blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
601a58e65541310880e10f036c051e58ddf089e2 | efe3c9ad40200e6a4cc54ade2867e455687eb11b | /home/migrations/0004_message.py | bb97cd54ee190824e8f4994f6e57f1580cb8bcbe | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | andrewhstead/stream-three-project | bec3b70b354b812d1a875ee4e305377038fe179b | 60e5f946455f12019a266b8231737435702ff95e | refs/heads/master | 2023-06-23T17:53:09.379297 | 2023-06-13T16:09:22 | 2023-06-13T16:09:22 | 126,410,294 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-23 23:14
from __future__ import unicode_literals
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0003_delete_team'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('date_sent', models.DateTimeField(auto_now_add=True)),
('subject', models.CharField(max_length=100)),
('message', tinymce.models.HTMLField(blank=True)),
],
),
]
| [
"andrew@andrewstead.co.uk"
] | andrew@andrewstead.co.uk |
de5bdd8d7521907a0d02b916dded40acdace4814 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/other/日立製作所_社会システム事業部_プログラミングコンテスト2020/c.py | f0dbb2903e9188f925ea9ea87e867040ab1f0e43 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | from collections import deque
import sys
input = sys.stdin.buffer.readline
N = int(input())
edges = [[] for _ in range(N)]
for _ in range(N - 1):
fr, to = map(lambda a: int(a) - 1, input().split())
edges[fr].append(to)
edges[to].append(fr)
dist = [10**10] * N
que = deque([(0, 0)])
while que:
now, d = que.popleft()
if dist[now] <= d:
continue
dist[now] = d
for to in edges[now]:
que.append((to, d + 1))
A = [i for i, d in enumerate(dist) if d % 2 == 0]
B = [i for i, d in enumerate(dist) if d % 2 == 1]
if len(A) > len(B):
A, B = B, A
ans = [-1] * N
nums = set(range(1, N + 1))
if len(A) <= N // 3:
mul = 1
for i in A:
ans[i] = 3 * mul
nums.remove(3 * mul)
mul += 1
nums = list(nums)
for i, n in zip(B, nums):
ans[i] = n
else:
mul = 1
for c, i in enumerate(A):
if c * 3 + 1 > N:
ans[i] = mul * 3
mul += 1
else:
ans[i] = c * 3 + 1
for c, i in enumerate(B):
if c * 3 + 2 > N:
ans[i] = mul * 3
mul += 1
else:
ans[i] = c * 3 + 2
print(*ans)
| [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
c3b42d25f9116f1bf61fa704be8f0a121762c825 | 4e097df1d8ee1c864699ce917195aa79e6a78c24 | /backend/purple_fire_27872/urls.py | 6f28f3da85dcdbeecef932b43d280b8980e4bc0d | [] | no_license | crowdbotics-apps/purple-fire-27872 | 2ddbac1b9e0a640e80171d6dea3301de204e2a13 | d630e111e9144b698d3581fc45c0067a1d52c45c | refs/heads/master | 2023-05-15T07:36:11.511788 | 2021-06-09T13:01:26 | 2021-06-09T13:01:26 | 375,356,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | """purple_fire_27872 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Purple Fire"
admin.site.site_title = "Purple Fire Admin Portal"
admin.site.index_title = "Purple Fire Admin"
# swagger
api_info = openapi.Info(
title="Purple Fire API",
default_version="v1",
description="API documentation for Purple Fire App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
786bbf41efc469014729778a19aca2a7ce6dc054 | c991da8bae5a74dec3e6400ca780206758b9840a | /old/Session002/DynamicProgramming/Triangle.py | 8e8fef6ae6c114c304f3abc1c5d8ea2d824c1bdf | [] | no_license | MaxIakovliev/algorithms | 0503baca3d35c8ad89eca8821c5b2928d805064b | 54d3d9530b25272d4a2e5dc33e7035c44f506dc5 | refs/heads/master | 2021-07-23T02:21:18.443979 | 2021-07-18T08:05:37 | 2021-07-18T08:05:37 | 45,613,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | class Solution:
"""
https://leetcode.com/problems/triangle/
solution:
https://leetcode.com/problems/triangle/discuss/38724/7-lines-neat-Java-Solution
"""
def minimumTotal(self, triangle: 'List[List[int]]') -> int:
dp=[0 for i in range(len(triangle)+1)]
for i in range(len(triangle)-1,-1,-1):
for j in range(len(triangle[i])):
dp[j]=min(dp[j],dp[j+1])+triangle[i][j]
return dp[0]
if __name__ == "__main__":
c=Solution()
print(c.minimumTotal([
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]))#11
| [
"max.iakovliev@gmail.com"
] | max.iakovliev@gmail.com |
f6180d6e48614c2a0d648ee7c5c04d9b51cdd379 | bb311256e15179e929b9fba277e16f67b1e674e5 | /backend/athlete_auction_28818/urls.py | 122f438761b809957bed0a2e6d02e7d31a115685 | [] | no_license | crowdbotics-apps/athlete-auction-28818 | bd14650fcf008eca4132ea44a8064e6d8ef93310 | 457aa0b49b2ac9c2d94e09b7cd6b07ba9a1644d5 | refs/heads/master | 2023-06-16T17:13:45.772189 | 2021-07-13T23:46:46 | 2021-07-13T23:46:46 | 385,762,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | """athlete_auction_28818 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Athlete Auction"
admin.site.site_title = "Athlete Auction Admin Portal"
admin.site.index_title = "Athlete Auction Admin"
# swagger
api_info = openapi.Info(
title="Athlete Auction API",
default_version="v1",
description="API documentation for Athlete Auction App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
696ec13eb480eb65068ec5403f76bb30b5f0a8de | 71f00ed87cd980bb2f92c08b085c5abe40a317fb | /Data/GoogleCloud/google-cloud-sdk/lib/surface/ai_platform/models/list.py | 632e720d7f254e7d84e144a1789781bfd9835dff | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | factoryofthesun/Rao-NLP | 2bd8269a8eed1cb352c14c8fde88e3111ccca088 | 87f9723f5ee51bd21310d58c3425a2a7271ec3c5 | refs/heads/master | 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 | Python | UTF-8 | Python | false | false | 1,782 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ai-platform models list command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import models
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import endpoint_util
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import models_util
from googlecloudsdk.core import resources
_COLLECTION = 'ml.models'
_DEFAULT_FORMAT = """
table(
name.basename(),
defaultVersion.name.basename()
)
"""
def _GetUri(model):
ref = resources.REGISTRY.ParseRelativeName(
model.name, models_util.MODELS_COLLECTION)
return ref.SelfLink()
class List(base.ListCommand):
"""List existing AI Platform models."""
@staticmethod
def Args(parser):
parser.display_info.AddFormat(_DEFAULT_FORMAT)
parser.display_info.AddUriFunc(_GetUri)
flags.GetRegionArg('model').AddToParser(parser)
def Run(self, args):
with endpoint_util.MlEndpointOverrides(region=args.region):
return models_util.List(models.ModelsClient())
| [
"guanzhi97@gmail.com"
] | guanzhi97@gmail.com |
0ff085f57b4a9657055b933dc0bfe0597fef0fa4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /QFXMcwaQZ8FTAuEtg_12.py | c3acc86a9fb4cf4cf4d78a239f5630f30554b163 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | """
Create a function that takes a single character as an argument and returns the
char code of its lowercased / uppercased counterpart.
### Examples
Given that:
- "A" char code is: 65
- "a" char code is: 97
counterpartCharCode("A") ➞ 97
counterpartCharCode("a") ➞ 65
### Notes
* The argument will always be a single character.
* Not all inputs will have a counterpart (e.g. numbers), in which case return the inputs char code.
"""
def counterpartCharCode(char):
return (ord(char.swapcase()))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
413532b7ca0867d03a3b8a5fab67927dad30a7fc | 494c191e87ae52470b9eb5d38d4851db168ed7cc | /leetcode/0179_largest_number.py | 82d09525cd00c4f7825de9c78d6378b767fd839d | [] | no_license | Jeetendranani/yaamnotes | db67e5df1e2818cf6761ab56cf2778cf1860f75e | 1f859fb1d26ffeccdb847abebb0f77e9842d2ca9 | refs/heads/master | 2020-03-19T01:12:45.826232 | 2018-05-30T20:14:11 | 2018-05-30T20:14:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | """
179. Largest Number
Given a list of non negative integers, arrange them such that they form the largest number.
For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note the result may be every large, so you need to return a string instead of an integer.
Approach 1: Sorting via custom comparator
Intuition
To construct the largest number, we want to ensure that the most significant digits are occupied by the largest digits.
Algorithm:
First, we convert each integer to a string. Then, we sort the array of strings.
While it might be tempting to simply sort the numbers in descending order, this causes leetcode for set of numbers with
the same leading digits. For example, sorting the problem example in descending order would produce the number 9534303,
while the correct answer can be achieved by transposing the 3 and 30. Therefore, for each pairwise comparison during
the sort, we compare the numbers achieved by concatenating the pair in both orders. We can prove that this sorts into
the proper order as following:
Assume that (without loss of generality), for some pair of integers a and b, our comparator dictates that a should
preceding b in sorted order. This means that a_b > b_a (where _ represents concatenation). For the sort to produce an
incorrect ordering, there must be some c for which b precendes c and c precedes a, this is a contradiction because
a_b > b_a and b_c > c_b implies a_c > c _a. In other words, our custom comparator preserves transitivity, so the sort
is correct.
Once the array is sorted, the most "signficant" number will at the front. There is a minor edge case comes up when the
array comesup when the array consists of only 0, we can simply return 0. Otherwise, we built a string out of the sorted
array and return it.
"""
class LargerNumKey(str):
def __lt__(x, y):
return x+y > y+x
class Solution:
def lagest_number(self, nums):
largest_num = ''.join(sorted(max(str, nums), key=LargerNumKey))
return '0' if largest_num[0] == '0' else largest_num | [
"yunpeng-li@hotmail.com"
] | yunpeng-li@hotmail.com |
29c57beb7192eb32d1352e5ca01ba1687eed5ad9 | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/tests/indexes/timedeltas/test_timedelta_range.pyi | 2d3cd837b31cc6f1546a327e09061dedc2bb2bb9 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 545 | pyi | # Stubs for pandas.tests.indexes.timedeltas.test_timedelta_range (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
from typing import Any
class TestTimedeltas:
def test_timedelta_range(self) -> None:
...
def test_linspace_behavior(self, periods: Any, freq: Any) -> None:
...
def test_errors(self) -> None:
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
49c6ca0beb4a387dfc9bada06b432530f567f400 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/command_lib/dataproc/jobs/trino.py | 303c27738c721cac3724dfc2ee0bd9e9ac9e78be | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 3,677 | py | # -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for the Trino job."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.dataproc.jobs import base as job_base
from googlecloudsdk.command_lib.dataproc.jobs import util as job_util
class TrinoBase(job_base.JobBase):
"""Submit a Trino job to a cluster."""
@staticmethod
def Args(parser):
"""Parses command line arguments specific to submitting Trino jobs."""
driver = parser.add_mutually_exclusive_group(required=True)
driver.add_argument(
'--execute',
'-e',
metavar='QUERY',
dest='queries',
action='append',
default=[],
help='A Trino query to execute.')
driver.add_argument(
'--file',
'-f',
help='HCFS URI of file containing the Trino script to execute.')
parser.add_argument(
'--properties',
type=arg_parsers.ArgDict(),
metavar='PARAM=VALUE',
help='A list of key value pairs to set Trino session properties.')
parser.add_argument(
'--properties-file',
help=job_util.PROPERTIES_FILE_HELP_TEXT)
parser.add_argument(
'--driver-log-levels',
type=arg_parsers.ArgDict(),
metavar='PACKAGE=LEVEL',
help=('A list of package-to-log4j log level pairs to configure driver '
'logging. For example: root=FATAL,com.example=INFO'))
parser.add_argument(
'--continue-on-failure',
action='store_true',
help='Whether to continue if a query fails.')
parser.add_argument(
'--query-output-format',
help=('The query output display format. See the Trino documentation '
'for supported output formats.'))
parser.add_argument(
'--client-tags',
type=arg_parsers.ArgList(),
metavar='CLIENT_TAG',
help='A list of Trino client tags to attach to this query.')
@staticmethod
def GetFilesByType(args):
return {'file': args.file}
@staticmethod
def ConfigureJob(messages, job, files_by_type, logging_config, args):
"""Populates the trinoJob member of the given job."""
trino_job = messages.TrinoJob(
continueOnFailure=args.continue_on_failure,
queryFileUri=files_by_type['file'],
loggingConfig=logging_config)
if args.queries:
trino_job.queryList = messages.QueryList(queries=args.queries)
if args.query_output_format:
trino_job.outputFormat = args.query_output_format
if args.client_tags:
trino_job.clientTags = args.client_tags
job_properties = job_util.BuildJobProperties(
args.properties, args.properties_file)
if job_properties:
# Sort properties to ensure tests comparing messages not fail on ordering.
trino_job.properties = encoding.DictToAdditionalPropertyMessage(
job_properties, messages.TrinoJob.PropertiesValue, sort_items=True)
job.trinoJob = trino_job
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
1aa77b0cf7ef09c20fc0e64eec1906052fe467e9 | cb14afc9864e370a17f21f4486a17c824fb10294 | /simple questions on loops and list comprehensions/Use a List Comprehension to create a list of all numbers between 1 and 50 that are divisible by 3.py | ec1abc951f14dacd5746142d8179b8e0ee50030d | [] | no_license | sandeepshiven/python-practice | 92130a1d34fe830433c0526b386ee4550a713d55 | 1bfa6145c5662231128a39fdfadf8db06f4b0958 | refs/heads/master | 2020-06-16T12:04:52.983978 | 2020-02-04T18:19:55 | 2020-02-04T18:19:55 | 195,565,480 | 0 | 1 | null | 2019-09-15T18:25:54 | 2019-07-06T17:21:17 | Python | UTF-8 | Python | false | false | 163 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 17:43:26 2019
@author: sandeep
"""
lst = [x for x in range(1,50) if x%3 == 0]
print(lst) | [
"sandeepshiven0@gmail.com"
] | sandeepshiven0@gmail.com |
826b03c57e962e20dbce7975d779ddf393b8a6c0 | 6f8267e19ad9bf828432d34780e7dde92fed054b | /src/exp/expChooseView.py | 2da4f79ad80ba95a9f34f71807af50e884eeaf23 | [] | no_license | ravika/expresso | 3129b5227cfc664d2adbec8c768bea9751898e0b | 319380d25e2ca4fc6111651d8e1c7cd98ad44a25 | refs/heads/master | 2016-08-03T19:32:15.823161 | 2015-05-02T10:16:37 | 2015-05-02T10:16:37 | 35,533,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,198 | py | # -*- coding: utf-8 -*-
##############
# Written by : Jaley Dholakiya
# Video Analytics Lab,IISc
#############
# Form implementation generated from reading ui file 'expChooseView.ui'
#
# Created: Sat Mar 14 01:53:22 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import os
root = os.getenv('EXPRESSO_ROOT')
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(QtGui.QWidget):
def __init__(self,parent=None):
super(Ui_Form,self).__init__(parent)
self.setupUi(self)
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(611, 591)
Form.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135);"))
self.widget = QtGui.QWidget(Form)
self.widget.setGeometry(QtCore.QRect(50, 20, 171, 241))
self.widget.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135)"))
self.widget.setObjectName(_fromUtf8("widget"))
self.label = QtGui.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(20, 170, 201, 71))
self.label.setStyleSheet(_fromUtf8("font: 15pt \"Ubuntu Condensed\";color:rgb(45,60,45)"))
self.label.setObjectName(_fromUtf8("label"))
self.pushButton = QtGui.QPushButton(self.widget)
self.pushButton.setGeometry(QtCore.QRect(10, 20, 141, 141))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.widget_2 = QtGui.QWidget(Form)
self.widget_2.setGeometry(QtCore.QRect(230, 20, 171, 241))
self.widget_2.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135)"))
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.pushButton_2 = QtGui.QPushButton(self.widget_2)
self.pushButton_2.setGeometry(QtCore.QRect(10, 20, 141, 141))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.label_2 = QtGui.QLabel(self.widget_2)
self.label_2.setGeometry(QtCore.QRect(10, 170, 151, 71))
self.label_2.setStyleSheet(_fromUtf8("font: 15pt \"Ubuntu Condensed\";color:rgb(45,60,45)"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.widget_3 = QtGui.QWidget(Form)
self.widget_3.setGeometry(QtCore.QRect(410, 20, 171, 241))
self.widget_3.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135)"))
self.widget_3.setObjectName(_fromUtf8("widget_3"))
self.pushButton_3 = QtGui.QPushButton(self.widget_3)
self.pushButton_3.setGeometry(QtCore.QRect(10, 20, 141, 141))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.label_3 = QtGui.QLabel(self.widget_3)
self.label_3.setGeometry(QtCore.QRect(10, 170, 151, 71))
self.label_3.setStyleSheet(_fromUtf8("font: 15pt \"Ubuntu Condensed\";color:rgb(45,60,45)"))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.widget_4 = QtGui.QWidget(Form)
self.widget_4.setGeometry(QtCore.QRect(230, 270, 171, 241))
self.widget_4.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135)"))
self.widget_4.setObjectName(_fromUtf8("widget_3"))
self.pushButton_4 = QtGui.QPushButton(self.widget_4)
self.pushButton_4.setGeometry(QtCore.QRect(10, 20, 141, 141))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_3"))
self.label_4 = QtGui.QLabel(self.widget_4)
self.label_4.setGeometry(QtCore.QRect(10, 170, 151, 71))
self.label_4.setStyleSheet(_fromUtf8("font: 15pt \"Ubuntu Condensed\";color:rgb(45,60,45)"))
self.label_4.setObjectName(_fromUtf8("label_3"))
self.widget_5 = QtGui.QWidget(Form)
self.widget_5.setGeometry(QtCore.QRect(50, 270, 171, 241))
self.widget_5.setStyleSheet(_fromUtf8("background-color:rgb(195,195,135)"))
self.widget_5.setObjectName(_fromUtf8("widget_3"))
self.pushButton_5 = QtGui.QPushButton(self.widget_5)
self.pushButton_5.setGeometry(QtCore.QRect(10, 20, 141, 141))
self.pushButton_5.setObjectName(_fromUtf8("pushButton_3"))
self.label_5 = QtGui.QLabel(self.widget_5)
self.label_5.setGeometry(QtCore.QRect(10, 170, 151, 71))
self.label_5.setStyleSheet(_fromUtf8("font: 15pt \"Ubuntu Condensed\";color:rgb(45,60,45)"))
self.label_5.setObjectName(_fromUtf8("label_3"))
self.widget_4.hide() #To to decided(to remove or not)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "Extract Features\n"
"via pre-trained net", None))
self.setPushButtonIcons()
self.label_2.setText(_translate("Form", "Visuallize deep\n"
"network Features", None))
self.label_3.setText(_translate("Form", "Evaluate \n"
"pre-trained Net", None))
self.label_4.setText(_translate("Form", "Model Weight \n"
"Surgery", None))
self.label_5.setText(_translate("Form", "Evaluate \n"
"pre-trained SVM", None))
def setPushButtonIcons(self):
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/res/exp/extractFeatures.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setIconSize(QtCore.QSize(141,141))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/res/exp/visuallize.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon1)
self.pushButton_2.setIconSize(QtCore.QSize(141,141))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/res/exp/accuracy.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_3.setIcon(icon2)
self.pushButton_3.setIconSize(QtCore.QSize(141,141))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/res/exp/accuracy.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_4.setIcon(icon3)
self.pushButton_4.setIconSize(QtCore.QSize(141,141))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/res/exp/accuracy.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_5.setIcon(icon4)
self.pushButton_5.setIconSize(QtCore.QSize(141,141))
def clickSlot(self):
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(root+"/src/train/images/visuallize.jpg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon1)
self.pushButton.setIconSize(QtCore.QSize(141,141))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| [
"ravika@gmail.com"
] | ravika@gmail.com |
a18d2854e9b097c3be8c7134d21f2cde9d04db3a | 7aa33a8a8d5360523bf2f6a2ce73f93fd5e63d23 | /robotics/Controll.py | e51768b510a2c08a7bac2113c5a90f9ab486318c | [] | no_license | iamMHZ/image-processing-with-opencv | 33b6fac0d50649c99fe35f078af8a38d53358447 | 7412f182ad564905bf24c8fa30f0492b7eb01bd1 | refs/heads/master | 2021-03-17T16:31:23.640213 | 2020-05-09T10:22:04 | 2020-05-09T10:22:04 | 247,002,943 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import inputs
print(inputs.devices.gamepads)
while True:
events = inputs.get_gamepad()
for event in events:
print(event.ev_type, event.code, event.state)
| [
"iammhz77@gmail.com"
] | iammhz77@gmail.com |
40647bde765a91a69ab9bf788cf3b28a4ec6715a | e811662c890217c77b60aa2e1295dd0f5b2d4591 | /src/problem_763.py | 33eb4a1f687002f6082644d2dd08682d2f076cda | [] | no_license | rewonderful/MLC | 95357f892f8cf76453178875bac99316c7583f84 | 7012572eb192c29327ede821c271ca082316ff2b | refs/heads/master | 2022-05-08T05:24:06.929245 | 2019-09-24T10:35:22 | 2019-09-24T10:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
def partitionLabels(S):
"""
算法:贪心,双指针
思路:
记录每个字母的最后出现的位置,再遍历整个字符串,用一个指针start和end记录当前区间的起止位置,
目标区间应该是能使得区间内的所有字母都只出现在区间内的最短的区间,
所以再遍历一次S,设置end = max(end, last[char]),当前位置 == end时,就说明一段区间已经
添加完了,ans append进去,更新start为end + 1 为下一区段的开始处
说是贪心就是因为是处理完一小段再处理一小段,前后还没关系
"""
last = {char: position for position, char in enumerate(S)}
start = end = 0
ans = []
for position, char in enumerate(S):
end = max(end, last[char])
if position == end:
ans.append(end - start + 1)
start = end + 1
return ans | [
"457261336@qq.com"
] | 457261336@qq.com |
6aecf7de4273913f02af82ef752225319d622d37 | ddf002d1084d5c63842a6f42471f890a449966ee | /basics/Python/PYTHON --------/Loops/for_perfect_number.py | 12c1710e98af7953b5053badaf4ec9ed6496e5f7 | [] | no_license | RaghavJindal2000/Python | 0ab3f198cbc5559bdf46ac259c7136356f7f09aa | 8e5c646585cff28ba3ad9bd6c384bcb5537d671a | refs/heads/master | 2023-01-01T23:56:02.073029 | 2020-10-18T19:30:01 | 2020-10-18T19:30:01 | 263,262,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | num=int(input("Enter the Number : "))
sum=0
for i in range(1,int(num/2)+1):
if(num%i==0):
sum=sum+i
if(sum==num):
print("Perfect Number")
else:
print("Not Perfect Number")
input() | [
"40332753+RaghavJindal2000@users.noreply.github.com"
] | 40332753+RaghavJindal2000@users.noreply.github.com |
a77a33ec7d947da341e4206109d82d8d7f44e697 | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/test_20190626133340.py | 12865a307e437ef3704eed2ac3124c68bd758365 | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,413 | py | import random
# # class Foo:
# # answer = 42
# # f1 = Foo()
# # f2 = Foo()
# # print(f1.answer)
# # print(f2.answer)
# # # both will print 42
# # f1.answer = 84
# # Foo.answer = 21
# # print(f1.answer) # 84
# # print(f2.answer) # 21
# class Foo:
# def __init__(self):
# self.answer = 42
# f1 = Foo()
# f2 = Foo()
# # f2.answer = 4000
# Foo.answer = 21
# # f1.answer = 2000
# print(f1.answer)
# print(f2.answer)
# # both will print 42 still
class Scenes(object):
# def __init__(self):
# # self.starting_room = starting_room
# # self.locations = {
# # 'room1': Room1(),
# # 'room2': Room2()
# # }
map_list = [
'room1',
'room2',
'finish'
]
def start(self):
print("You are at the start")
print("Where would you like to go")
self.locations()
def room1(self):
print("You enter room 1")
print("Where would you like to go")
self.locations()
def room2(self):
print("You enter room 2")
print("Where would you like to go")
self.locations()
def finish(self):
print("You have finished")
exit(0)
def locations(self):
print("def locations:", self.map_list)
for i in self.map_list:
print(i)
cmd = {
'room1': room1,
'room2': room2,
}
def guessing_game(self):
n = random.randint(1,4)
print("Oh no a mini-game.")
print("Guess the number between 1-4. To pass")
answer = 0
while answer =! n:
answer = input("> ")
print("wrong guess again!")
if answer == n:
print("Success")
# class Map(Scenes):
# a = Scenes()
# map_dict = {
# 'room1': a.room1(),
# 'room2': a.room2(),
# }
# class Engine():
# def __init__(self, map):
# self.map = map
# def play(self):
# while True:
# # a = self.map.dict_locations
# print('yes')
thescenes = Scenes()
# thelocations = Locations()
# thedict = thelocations.map()
# while True:
# print("loop")
# thelocations.map.dict_locations.get('room1')
thescenes.start()
while True:
action = input("> ")
if action in thescenes.map_list:
print("success")
thescenes.map_list[action](thescenes)
| [
"ahivent@gmail.com"
] | ahivent@gmail.com |
1f79efdb1f12760d507a1294acfc682189e2cc4f | 200abee8ebb5fa255e594c8d901c8c68eb9c1a9c | /venv/01_Stepik/Python_Osnovi_i_primenenie/2.3_2.py | 50544368335316c290b184d30ded2008229713e4 | [] | no_license | Vestenar/PythonProjects | f083cbc07df57ea7a560c6b18efed2bb0dc42efb | f8fdf9faff013165f8d835b0ccb807f8bef6dac4 | refs/heads/master | 2021-07-20T14:14:15.739074 | 2019-03-12T18:05:38 | 2019-03-12T18:05:38 | 163,770,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import itertools
def primes():
num = 2
while True:
if is_prime(num):
yield num
num += 1
def is_prime(num):
if num == 2: return True
if num % 2 == 0: return False
for _ in range(3, num // 2, 2):
if num % _ == 0:
return False
return True
print(list(itertools.takewhile(lambda x : x <= 31, primes()))) | [
"vestenar@gmail.com"
] | vestenar@gmail.com |
e3740376355a7ad6d32d7fb3097ea9e1f04a6db2 | 4df3712caff818c0554e7fbe4b97dee5fcfd8675 | /common/sendMail.py | e8175fd9a7b1c03e70da7d866819a40cdff5ba85 | [] | no_license | Qingyaya/interface | 456057a740bd77ba6c38eda27dd1aef658e0add9 | 3ae37816f52ad8c45e192596a854848d8e546b14 | refs/heads/master | 2020-03-22T07:16:04.171904 | 2018-12-05T05:20:25 | 2018-12-05T05:20:25 | 139,690,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py |
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from common.ReadConfig import ReadConfig
from common.Log import Log
log=Log()
rc=ReadConfig()
def send_mail(report_file):
sender=rc.get_email('sender')
psw=rc.get_email('psw')
receiver=rc.get_email('receiver')
smtpserver=rc.get_email('smtp_server')
port=rc.get_email('port')
with open(report_file,'rb') as f:
mailbody = f.read()
# 定义邮件内容
msg = MIMEMultipart()
body = MIMEText(mailbody, _subtype='html', _charset='utf-8')
msg['Subject'] = u'自动化测试报告'
msg['from'] = sender
msg['To'] = ', '.join(eval(receiver))
msg.attach(body)
# 添加附件
att = MIMEText(open(report_file, 'rb').read(), 'base64', 'utf-8')
att['Content-Type'] = 'application/octet-stream'
att['Content-Disposition'] = 'attachment; filename = "TestReport.html"'
msg.attach(att)
try:
smtp = smtplib.SMTP_SSL(smtpserver, port)
except:
smtp = smtplib.SMTP()
smtp.connect(smtpserver, port)
# 用户名密码
try:
smtp.login(sender, psw)
smtp.sendmail(sender, eval(receiver), msg.as_string())
log.info('Send mail Success!!! test report email has send out!')
except Exception as e:
log.error('Send Mail Failed !!! error: %s' %e)
smtp.quit()
if __name__ == '__main__':
report_file='E:\\IDScloud_ui_demo\\report\\20180517\\20180517100220.html'
send_mail(report_file)
| [
"dongchunyi@idscloud.cn"
] | dongchunyi@idscloud.cn |
3035e52b9cc917ae6870cd17760f97e41ca9995c | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/keras/applications/mobilenet.py | 97c6b85882e6ea224b0201820317c92823c32ddd | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 1,662 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import mobilenet
from tensorflow.python.keras.applications import keras_modules_injection
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
@keras_modules_injection
def MobileNet(*args, **kwargs):
return mobilenet.MobileNet(*args, **kwargs)
@keras_export('keras.applications.mobilenet.decode_predictions')
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return mobilenet.decode_predictions(*args, **kwargs)
@keras_export('keras.applications.mobilenet.preprocess_input')
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return mobilenet.preprocess_input(*args, **kwargs)
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
b516fc14e72cd98ba60397e18718e0b2b396a2e6 | b43cee0973a455a58b74233d4e02d522587f93ae | /skillbox/basic/module22/war_peace.py | 1871dfaea75c70bd1b4c14d260a3e1c153729316 | [] | no_license | ivadimn/py-input | 5861cc92758378f44433bd6b1af7ba78da04d1c0 | bbfdd74c4dffe66440490d79082de2c0318e5027 | refs/heads/master | 2023-08-15T03:34:01.916026 | 2023-07-24T14:48:08 | 2023-07-24T14:48:08 | 202,401,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import zipfile
import string
def frequency(text: str) -> dict:
len_text = len(text)
symbols = set(text)
freq = {ch: text.count(ch) / len_text for ch in sorted(symbols)}
return {key : freq[key] for key in sorted(freq, key=freq.get)}
exclude = string.punctuation + " 0123456789" + \
"\u3002\u201e\u201c\u201f\u201d\u301e\u301f\u00ab\u00bb\u00b7\u00a8" + \
"\u2116\u00a0\u00b0\u00ac\u0227\u2007\u2026\u2012\u2013\u2014\n\r\t"
table = "".maketrans("", "", exclude)
zip_file = zipfile.ZipFile("voyna-i-mir.zip")
print(zip_file.namelist())
text = ""
for fileName in zip_file.namelist():
bytes = zip_file.read(fileName)
content = bytes.decode("UTF-8")
content = content.translate(table)
text = text.join(content)
zip_file.close()
freq_table = frequency(text)
freq_file = open("wp_analysis.txt", "w")
print("\nСдержимое файла wp_analysis.txt: \n")
for k, v in freq_table.items():
if k.isalpha():
line = "{0} {1}\n".format(k, v)
else:
line = "{0} {1}\n".format(ord(k), v)
print(line, end = "")
freq_file.write(line)
print()
freq_file.close() | [
"ivadimn@mail.ru"
] | ivadimn@mail.ru |
d674d9782d314530754af4814fa59a5ad03c66f8 | 630681b5a80acdad9b5597449559ecf89e917aa0 | /env/bin/cftp | e46f526ce10262450281bfedc3754cf60aefe6d6 | [] | no_license | stuartses/trivia | ed5cd090fe7143159c8ed669edd5540de5f9f0f4 | 203b9ff4b3834d4f4a58c23f573187d0f960a64c | refs/heads/master | 2022-12-17T15:57:09.735439 | 2020-09-20T16:32:24 | 2020-09-20T16:32:24 | 296,960,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | #!/home/stuartes/repositories/chat/env/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==20.3.0','console_scripts','cftp'
__requires__ = 'Twisted==20.3.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==20.3.0', 'console_scripts', 'cftp')()
)
| [
"stuart.ses@hotmail.com"
] | stuart.ses@hotmail.com | |
b6357591f310a910f5139ea6c1aafba52ff7d277 | d9d0d3a82d5ba4016097491c276409b9b1ea778a | /Kattis/relocation.py | 99a38f2674f48771aab4f6fdf4bd413803ff3370 | [
"MIT"
] | permissive | ruidazeng/online-judge | 311b9f2c3120b47da91da2d404e2ea1d9a2a24dd | 6bdf8bbf1af885637dab474d0ccb58aff22a0933 | refs/heads/master | 2022-02-16T00:35:11.852600 | 2022-01-26T02:28:53 | 2022-01-26T02:28:53 | 191,827,952 | 0 | 1 | MIT | 2019-07-31T10:25:36 | 2019-06-13T20:21:18 | Python | UTF-8 | Python | false | false | 273 | py | _, Q = map(int, input().split())
companies = [int(x) for x in input().split()]
for _ in range(Q):
indicator, x, y = map(int, input().split())
if indicator == 1:
companies[x-1] = y
elif indicator == 2:
print(abs(companies[x-1] - companies[y-1])) | [
"ruida.zeng@vanderbilt.edu"
] | ruida.zeng@vanderbilt.edu |
94a882b3ad4cf2c8ce3f7d515284b7b95e0bbeda | 06ba98f4e71e2e6e04e9e381987333a743511818 | /history/migrations/0002_auto_20180803_0007.py | 80603c86d738101b7f32f908e7b49fa21ff1e7da | [] | no_license | AnEvilHerbivore/Django-Music | e99c6f7936088a3baa42abeaea4b46361fb415cb | 8f0b45d22053ca674f4dc8f963cb0da949469213 | refs/heads/master | 2022-12-10T10:08:35.831550 | 2018-08-03T19:12:42 | 2018-08-03T19:12:42 | 141,728,372 | 0 | 0 | null | 2021-06-10T20:43:27 | 2018-07-20T15:24:59 | Python | UTF-8 | Python | false | false | 1,053 | py | # Generated by Django 2.0.1 on 2018-08-03 00:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('history', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='artist',
name='biggest_hit',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='artist',
name='birth_date',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='artist',
name='name',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='song',
name='album',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='song',
name='title',
field=models.CharField(default='', max_length=100),
),
]
| [
"joeshepmedia@gmail.com"
] | joeshepmedia@gmail.com |
098396a34b3be24ef43d0b1428dcb079fb5c911a | c31ee8136a57a96649196081e1cfde0676c2a481 | /larcv/app/tests/test_matrixmult.py | 63a26af9a4138e0c87251de2c5104d965db970cd | [
"MIT"
] | permissive | DeepLearnPhysics/larcv2 | b12b46168e5c6795c70461c9495e29b427cd88b5 | 31863c9b094a09db2a0286cfbb63ccd2f161e14d | refs/heads/develop | 2023-06-11T03:15:51.679864 | 2023-05-30T17:51:19 | 2023-05-30T17:51:19 | 107,551,725 | 16 | 19 | MIT | 2023-04-10T10:15:13 | 2017-10-19T13:42:39 | C++ | UTF-8 | Python | false | false | 1,073 | py | import os,sys
import ROOT
import numpy as np
from larcv import larcv
print larcv.Image2D
# TESTS MATRIX MULTIPLICATION FEATURE
a = np.random.rand(6,5)
b = np.random.rand(5,8)
aI = larcv.Image2D( a.shape[0], a.shape[1] )
bI = larcv.Image2D( b.shape[0], b.shape[1] )
arows = a.shape[0]
acols = a.shape[1]
brows = b.shape[0]
bcols = b.shape[1]
for r in range(0,arows):
for c in range(0,acols):
aI.set_pixel( r, c, a[r,c] )
for r in range(0,brows):
for c in range(0,bcols):
bI.set_pixel( r, c, b[r,c] )
C = np.dot(a,b)
CI = aI*bI
crows = CI.meta().rows()
ccols = CI.meta().cols()
print "A diff"
Adiff = np.zeros( a.shape )
for r in range(0,arows):
for c in range(0,acols):
Adiff[r,c] = aI.pixel(r,c)-a[r,c]
print Adiff
print "B diff"
Bdiff = np.zeros( b.shape )
for r in range(0,brows):
for c in range(0,bcols):
Bdiff[r,c] = bI.pixel(r,c)-b[r,c]
print Bdiff
print "CDiff"
Cdiff = np.zeros( C.shape )
for r in range(0,crows):
for c in range(0,ccols):
Cdiff[r,c] = CI.pixel(r,c)-C[r,c]
print Cdiff
| [
"kazuhiro@nevis.columbia.edu"
] | kazuhiro@nevis.columbia.edu |
078153fca42249d9d1fb37d3cd7526a82fef59bc | fa2ab3d980aeff387edc556121b124fd68078789 | /ConditionalPrograms/ShippingAccount.py | af9ed57cdbf6a844dd86373f191d63a1bd4db288 | [
"MIT"
] | permissive | MiguelCF06/PythonProjects | 6e0a3323d3a44a893ec0afafcba7ec3882e62aa3 | dfa49203c3ed1081728c7f4e565f847629662d75 | refs/heads/master | 2022-10-17T23:22:04.357296 | 2020-06-10T18:03:38 | 2020-06-10T18:03:38 | 265,905,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | print("Welcome to the Shipping Accounts Program\n")
username = ["mikeL", "Omar293", "JJlk", "JoelW"]
user = input("Hello, what is your username: ")
if user not in username:
print("Sorry, you do not have an account with us. Goodbye.")
else:
print("Hello {}. Welcome back to your account.".format(user))
print("Current shipping prices are as follows:\n")
print("Shipping orders 0 to 100:\t\t$5.10 each")
print("Shipping orders 100 to 500:\t\t$5.00 each")
print("Shipping orders 500 to 1000:\t$4.95 each")
print("Shipping orders over 1000:\t\t$4.80 each\n")
amount = int(input("How many items would you like to ship: "))
if amount <= 0:
print("Nothing to do.")
if amount > 0 and amount <= 100:
items = 5.10
price = items * amount
print("To ship {} items it will cost you ${} at $5.10 per item.".format(amount, price))
elif amount > 100 and amount <= 500:
items = 5.00
price = items * amount
print("To ship {} items it will cost you ${} at $5.00 per item.".format(amount, price))
elif amount > 500 and amount <= 1000:
items = 4.95
price = items * amount
print("To ship {} items it will cost you ${} at $4.95 per item.".format(amount, price))
else:
items = 4.80
price = items * amount
print("To ship {} items it will cost you ${} at $4.80 per item.".format(amount, price))
print()
answer = input("Would you like to place this order (y/n): ")
if answer == "n" or answer == "N":
print("Okay, no order is being placed at this time.")
elif answer == "y" or answer == "Y":
print("Okay. Shipping your {} items.".format(amount)) | [
"miguel.cipamocha@gmail.com"
] | miguel.cipamocha@gmail.com |
a3739687fd238c1cd2484eca5cf46e5c9c27e987 | de15d27440ceb922a8d12f8db5881ae1982592ec | /sampledb/models/migrations/publications_add_object_name.py | 3b07967f3ffd3718788e6af1f4c7eb96f5ccb804 | [
"MIT"
] | permissive | maltedeckers/sampledb | 24f39f1adbe9bcc341309a4b6620768a8dc3857c | 30ad29f8df01290d4ff84a9b347f15a10856ac22 | refs/heads/master | 2023-08-22T04:25:47.826698 | 2021-05-07T09:07:02 | 2021-05-07T09:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # coding: utf-8
"""
Add object_name column to object_publications table.
"""
import os
MIGRATION_INDEX = 27
MIGRATION_NAME, _ = os.path.splitext(os.path.basename(__file__))
def run(db):
# Skip migration by condition
column_names = db.session.execute("""
SELECT column_name
FROM information_schema.columns
WHERE table_name = 'object_publications'
""").fetchall()
if ('object_name',) in column_names:
return False
# Perform migration
db.session.execute("""
ALTER TABLE object_publications
ADD object_name TEXT NULL
""")
return True
| [
"f.rhiem@fz-juelich.de"
] | f.rhiem@fz-juelich.de |
b34775b5a3efbd0dda72ca1c924c1daa49d5995a | ac23f0e5bb60c3201ea16d92369f8defa50f574a | /0x0B-python-input_output/4-append_write.py | 6d1d834297f4e5478a9ff2f4ab4921ad9f4a8ea5 | [] | no_license | Nukemenonai/holbertonschool-higher_level_programming | 85ba3e61517ee48a2e73980c915e7033e8090f06 | 3c467bb8ab3fa38454709ed7eb9819e0eb445310 | refs/heads/master | 2020-09-29T00:21:47.583303 | 2020-08-30T22:40:59 | 2020-08-30T22:40:59 | 226,901,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/usr/bin/python3
def append_write(filename="", text=""):
""" writes a string UTF8 to a text file
returns the number of characters written
filename: name of the file.
text: the text to insert
appends
"""
with open(filename, 'a') as f:
n = f.write(text)
f.close()
return n
| [
"david.giovanni.ovalle@gmail.com"
] | david.giovanni.ovalle@gmail.com |
24c072a5dea3b8bd6c343321376a8de0b7705640 | a6ffe7990cb5690a20566f64e343441e79d4d11a | /leetcode/10. 正则表达式匹配.py | 213fb81272bf7ae50cc592b1ef1bb296b8415fac | [] | no_license | ywcmaike/OJ_Implement_Python | 26b907da4aece49d3833382f80665a6263cbf0ec | 48e99509e675a6708a95a40912f0f0f022a08d73 | refs/heads/master | 2022-11-26T17:35:22.066443 | 2020-08-02T16:19:25 | 2020-08-02T16:19:25 | 72,869,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:maike
# datetime:2020/7/22 下午6:44
import sys
if __name__ == "__main__":
# 读取第一行的n
n = int(sys.stdin.readline().strip())
ans = 0
for i in range(n):
# 读取每一行
line = sys.stdin.readline().strip()
# 把每一行的数字分隔后转化成int列表
values = list(map(int, line.split()))
for v in values:
ans += v
print(ans)
if __name__ == '__main__':
| [
"2755289083@qq.com"
] | 2755289083@qq.com |
441d888c4903420479c5f874867acad5a6233fe8 | 5cf3f04bdee5a17d7e4b7e14294047ce3d1dc40a | /guess_dice/middleware/ipAddress.py | b57a807ddfe48e91f76edc72a7e66852d8f71596 | [] | no_license | gzgdouru/guess_dice_site | bc2e4b284d5c0399232247ecc7634341199b5ad7 | 03bfadef8412a8d1d7506c1bfb5e58aee68ba343 | refs/heads/master | 2020-04-06T12:45:09.757664 | 2018-12-29T14:15:41 | 2018-12-29T14:15:41 | 157,469,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from django.utils.deprecation import MiddlewareMixin
from analysis.models import ClientIp
class MarkVisitIpMiddleware(MiddlewareMixin):
def process_request(self, request):
try:
realIp = request.META["HTTP_X_FORWARDED_FOR"]
realIp = realIp.split(",")[0]
except:
realIp = request.META["REMOTE_ADDR"]
url = request.path
if realIp != "127.0.0.1":
ClientIp(ip=realIp, url=url).save()
| [
"18719091650@163.com"
] | 18719091650@163.com |
7fe0b97d863104f488ad653d559526403da60608 | f090c3e0faa70cf0ef7c4be99cb894630bce2842 | /scripts/dataAnalysis/EnergyTransport/2013Aug04/individual_fits/function of heating time/fitter_script_dsplaced_2212_50_ion2.py | 76cd7f8d1e7e158da7d9e4d47a24dc31d87797e8 | [] | no_license | HaeffnerLab/resonator | 157d1dc455209da9b7de077157bda53b4883c8b7 | 7c2e377fdc45f6c1ad205f8bbc2e6607eb3fdc71 | refs/heads/master | 2021-01-09T20:48:03.587634 | 2016-09-22T18:40:17 | 2016-09-22T18:40:17 | 6,715,345 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | import lmfit
import labrad
from labrad import types as T
from lamb_dicke import lamb_dicke
from rabi_flop_fitter import rabi_flop_time_evolution
import numpy as np
from matplotlib import pyplot
'''
script parameters
'''
info = ('Carrier Flops', ('2013Sep04','2212_50'))
ion_selection = 2
trap_frequency = T.Value(3.0, 'MHz')
projection_angle = 45 #degrees
offset_time = 0.0
sideband_order = -1
fitting_region = (0, 40) #microseconds
'''
compute lamb dicke parameter
'''
eta = lamb_dicke.lamb_dicke(trap_frequency, projection_angle)
print 'Lamb Dicke parameter: {0:.2f}'.format(eta)
'''
initialize the fitter
'''
flop = rabi_flop_time_evolution(sideband_order, eta)
'''
create fitting parameters
'''
params = lmfit.Parameters()
params.add('excitation_scaling', value = 1.0, vary = False)
params.add('detuning', value = 0, vary = 0) #units of rabi frequency
params.add('time_2pi', value = 1.532954, vary = 0) #microseconds
params.add('nbar', value = 3.699035, min = 0.0, max = 200.0, vary= 0)
params.add('alpha', value = 1.0, min = 0.0, max = 200.0, vary = 1)
'''
load the dataset
'''
dv = labrad.connect().data_vault
title,dataset = info
date,datasetName = dataset
dv.cd( ['','Experiments','Blue Heat RabiFlopping',date,datasetName] )
dv.open(1)
times,prob = dv.get().asarray.transpose()[[0, 1 + ion_selection],:]
print 'heat duration', dict(dv.get_parameters())['Heating.blue_heating_duration']
tmin,tmax = times.min(), times.max()
detailed_times = np.linspace(tmin, tmax, 1000)
'''
compute time evolution of the guessed parameters
'''
guess_evolution = flop.compute_evolution_coherent(params['nbar'].value , params['alpha'].value, params['detuning'].value, params['time_2pi'].value, detailed_times - offset_time, excitation_scaling = params['excitation_scaling'].value)
'''
define how to compare data to the function
'''
def rabi_flop_fit_thermal(params , t, data):
model = flop.compute_evolution_coherent(params['nbar'].value , params['alpha'].value, params['detuning'].value, params['time_2pi'].value, t - offset_time, excitation_scaling = params['excitation_scaling'].value)
return model - data
'''
perform the fit
'''
region = (fitting_region[0] <= times) * (times <= fitting_region[1])
result = lmfit.minimize(rabi_flop_fit_thermal, params, args = (times[region], prob[region]))
fit_values = flop.compute_evolution_coherent(params['nbar'].value , params['alpha'].value, params['detuning'].value, params['time_2pi'].value, detailed_times - offset_time, excitation_scaling = params['excitation_scaling'].value)
lmfit.report_errors(params)
'''
make the plot
'''
pyplot.figure()
pyplot.plot(detailed_times, guess_evolution, '--k', alpha = 0.5, label = 'initial guess')
pyplot.plot(times, prob, 'ob', label = 'data')
pyplot.plot(detailed_times, fit_values, 'r', label = 'fitted')
pyplot.legend()
pyplot.title(title)
pyplot.xlabel('time (us)')
pyplot.ylabel('D state occupation probability')
pyplot.text(max(times)*0.70,0.68, 'detuning = {0}'.format(params['detuning'].value))
pyplot.text(max(times)*0.70,0.73, 'nbar = {:.0f}'.format(params['nbar'].value))
pyplot.text(max(times)*0.70,0.78, '2 Pi Time = {:.1f} us'.format(params['time_2pi'].value))
pyplot.show() | [
"soenkeamoeller@gmail.com"
] | soenkeamoeller@gmail.com |
0c5dad8fd3938d30a3086f85c582ec0892a2191f | 3f46af2da32d9f02d1ebbdef6784ece1d64aace3 | /Production/python/PrivateSamples/EMJ_2016_mMed-1600_mDark-20_ctau-225_unflavored-down_cff.py | 108dad647638d680e89dd21aef1f1b6a9bff01af | [] | no_license | cms-svj/TreeMaker | 53bf4b1e35d2e2a4fa99c13c2c8b60a207676b6d | 0ded877bcac801a2a394ad90ed987a20caa72a4c | refs/heads/Run2_2017 | 2023-07-19T07:14:39.175712 | 2020-10-06T21:10:26 | 2020-10-06T21:10:26 | 305,753,513 | 0 | 0 | null | 2021-01-26T18:58:54 | 2020-10-20T15:32:19 | null | UTF-8 | Python | false | false | 1,892 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-1.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-2.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-3.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-4.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-5.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-6.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-7.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-8.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-9.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1600_mDark-20_ctau-225_unflavored-down_n-500_part-10.root',
] )
| [
"enochnotsocool@gmail.com"
] | enochnotsocool@gmail.com |
32d00cbdf934957158d5c286facfeab2e5d2170f | af632a0d727cd350a3c95360bb1bb8a411051da7 | /mysite/reading/migrations/0005_auto__add_field_text_synopsis.py | 92b689114d31eff115b9e7fc5a753e368632936b | [] | no_license | rybesh/mysite | f760fec83f1b552abd62010cff4ada4c6fda66b0 | c091284d802ef719d7535d9c8790f4c6e458f905 | refs/heads/master | 2016-09-05T18:01:31.200290 | 2014-07-23T15:36:09 | 2014-07-23T15:36:09 | 1,242,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Text.synopsis'
db.add_column('reading_text', 'synopsis', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# Deleting field 'Text.synopsis'
db.delete_column('reading_text', 'synopsis')
models = {
'reading.note': {
'Meta': {'object_name': 'Note'},
'created': ('django.db.models.fields.DateTimeField', [], {'unique': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markdown': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['reading.Text']"})
},
'reading.text': {
'Meta': {'object_name': 'Text'},
'bibtex': ('django.db.models.fields.TextField', [], {}),
'citation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'markdown': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'related_texts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_texts_rel_+'", 'to': "orm['reading.Text']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'small_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'synopsis': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['reading']
| [
"ryanshaw@unc.edu"
] | ryanshaw@unc.edu |
fd5cb1e3cc6d7bf3bc992db71056e2364fb1b3ab | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_6845.py | 25671db34c9e0643e32318d55e46d052ec86f703 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # Efficient way to format string
"select %(tableName)s.somefield, count(*) from %(tableName)s WHERE %(tableName)s.TimeStamp > %(fromDate)s and %(tableName)s.EndTimeStamp < %(to_data)s group by %(tableName)s.ProviderUsername;" %{'tableName':tableName, 'fromDate':fromDate, 'to_data':to_data}
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
76cb32021bcffbb90f7204eb7683d786698f1d8a | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /from Stephen/google list/752. Open the Lock (pass, bfs).py | 03123b3ab45fbe0c24776bf3c08be55ad02d55cd | [] | no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 2,272 | py | class Solution:
def openLock(self, deadends: List[str], target: str) -> int:
'''
shortest path -> BFS
'''
def toString(cur):
_str = ""
for v in cur:
_str += str(v)
return _str
def checkDeadEnds(deadendsSet, _str):
if _str in deadendsSet:
return False
return True
def findNextStep(deadendsSet, curLock, visited):
directions = [[1,0,0,0], [-1,0,0,0],
[0,1,0,0], [0,-1,0,0],
[0,0,1,0], [0,0,-1,0],
[0,0,0,1], [0,0,0,-1]]
nextSteps = []
for d in directions:
cur = [curLock[0] + d[0], curLock[1] + d[1], curLock[2] + d[2], curLock[3] + d[3]]
for i in range(0, 4):
if cur[i] == -1:
cur[i] = 9
elif cur[i] == 10:
cur[i] = 0
_str = toString(cur)
if checkDeadEnds(deadendsSet, _str) and _str not in visited:
nextSteps.append(cur)
visited.add(_str)
return nextSteps
deadendsSet = set()
for d in deadends:
deadendsSet.add(d)
lock = [0,0,0,0]
if toString(lock) in deadendsSet:
return -1
q = collections.deque()
q.append(lock)
moves = 0
visited = set()
while len(q) > 0:
curSize = len(q)
for i in range(0, curSize):
cur = q.pop()
if toString(cur) == target:
return moves
nextSteps = findNextStep(deadendsSet, cur, visited)
q.extendleft(nextSteps)
moves += 1
return -1
| [
"56766457+Wei-LiHuang@users.noreply.github.com"
] | 56766457+Wei-LiHuang@users.noreply.github.com |
07aa0556223da2feccd58233234db58c8f18e439 | 35fff80627ad675bec1e429943cb2bbbaf141ca2 | /notebooks/Papers/paper2/packages/lc/base.py | 83a7f05be6d1e91398e9dfda3a61890825c177d8 | [] | no_license | ishrat2003/IS-Goldsmiths | bac3473b7ffde7cebfb952cd78aba510c8d72c6f | afae9525ceb62cd09eb14149ee2b88798c5ceb90 | refs/heads/master | 2020-04-27T09:24:10.399620 | 2019-10-16T21:23:13 | 2019-10-16T21:23:13 | 174,212,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | import re, sys, numpy
from nltk import word_tokenize, pos_tag
from nltk.stem.porter import PorterStemmer
import utility
from sklearn.cluster import KMeans
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
class Base():
def __init__(self, text, filterRate = 0):
self.rawText = text
self.text = self.__clean(text)
self.stopWords = utility.Utility.getStopWords()
self.stemmer = PorterStemmer()
self.wordInfo = {}
self.featuredWordInfo = {}
self.allowedPOSTypes = ['NN', 'NNP', 'NNS', 'NNPS']
self.minWordSize = 2
self.sentences = []
self.punctuationTypes = ['.', '?', '!']
self.maxCount = 1
self.maxScore = 0
self.filterRate = filterRate
self.topScorePercentage = filterRate
self.filteredWords = {}
self.contributors = []
return
'''
allOptions = ['NN', 'NNP', 'NNS', 'NNPS', 'JJ', 'JJR', 'JJS' 'RB', 'RBR', 'RBS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
'''
def setAllowedPosTypes(self, allowedPOSTypes):
self.allowedPOSTypes = allowedPOSTypes
return
def setFilterWords(self, filterRate = 0.2):
self.filterRate = filterRate
self.loadFilteredWords()
return
def setTopScorePercentage(self, topScorePercentage):
self.topScorePercentage = topScorePercentage
return
def getRawText(self):
return self.rawText
def getCleanText(self):
return self.text
def getContrinutors(self):
return self.contributors
def getWordInfo(self):
return self.wordInfo
def getSentences(self):
return self.sentences
def loadFilteredWords(self):
minAllowedScore = self.maxCount * self.filterRate
self.filteredWords = {}
for word in self.wordInfo:
if self.wordInfo[word]['count'] <= minAllowedScore:
continue
index = len(self.filteredWords)
self.filteredWords[word] = self.wordInfo[word]
self.filteredWords[word]['index'] = index
print('----------------------')
print("Total local vocab: ", len(self.wordInfo))
print("Filtered local vocab: ", len(self.filteredWords))
return self.filteredWords
def loadSentences(self, text):
words = self.__getWords(text, True)
self.wordInfo = {}
self.sentences = []
currentSentence = []
for word in words:
(word, type) = word
word = self.__cleanWord(word)
if type in self.punctuationTypes:
if len(currentSentence) > 1:
# If more than one word than add as sentence
self.sentences.append(currentSentence)
currentSentence = []
if len(word) < self.minWordSize:
continue
wordKey = self._addWordInfo(word, type)
if wordKey and (wordKey not in currentSentence):
currentSentence.append(wordKey)
# Processing last sentence
if len(currentSentence) > 1:
# If more than one word than add as sentence
self.sentences.append(currentSentence)
self.filteredWords = self.wordInfo
return self.sentences
def displayPlot(self, fileName):
#rcParams['figure.figsize']=15,10
mpl.rcParams.update({'font.size': 15})
points = self.getPoints()
if not points:
print('No points to display')
return
plt.figure(figsize=(20, 20)) # in inches(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=None, edgecolors=None, *, data=None, **kwargs)[source]
for point in points:
plt.scatter(point['x'], point['y'], c = point['color'])
plt.annotate(point['label'],
xy=(point['x'], point['y']),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(fileName)
print('After saving')
plt.show()
return
def getPoints(self):
if not len(self.wordInfo):
return None
topWordScores = self.maxScore * self.topScorePercentage
points = []
for word in self.filteredWords:
point = {}
point['x'] = self._getX(word)
point['y'] = self._getY(word)
point['color'] = 'green'
point['label'] = self.filteredWords[word]['pure_word']
point['type'] = self.filteredWords[word]['type']
if self.isTopic(word, topWordScores):
point['color'] = 'red'
self.contributors.append(word)
points.append(point)
return points
def isTopic(self, word, topWordScores):
return (self.filteredWords[word]['score'] >= topWordScores)
def _getX(self, word):
return 0
def _getY(self, word):
return 0
def _addWordInfo(self, word, type):
if not word or (type not in self.allowedPOSTypes):
return None
localWordInfo = {}
localWordInfo['pure_word'] = word
wordKey = self.stemmer.stem(word.lower())
localWordInfo['stemmed_word'] = wordKey
localWordInfo['type'] = type
if localWordInfo['stemmed_word'] in self.wordInfo.keys():
self.wordInfo[wordKey]['count'] += 1
if self.maxCount < self.wordInfo[wordKey]['count']:
self.maxCount = self.wordInfo[wordKey]['count']
return wordKey
localWordInfo['count'] = 1
localWordInfo['index'] = len(self.wordInfo)
self.wordInfo[wordKey] = localWordInfo
return wordKey
def __getWords(self, text, tagPartsOfSpeach = False):
words = word_tokenize(text)
if tagPartsOfSpeach:
return pos_tag(words)
return words
def __cleanWord(self, word):
return re.sub('[^a-zA-Z0-9]+', '', word)
def __clean(self, text):
text = re.sub('<.+?>', '. ', text)
text = re.sub('&.+?;', '', text)
text = re.sub('[\']{1}', '', text)
text = re.sub('[^a-zA-Z0-9\s_\-\?:;\.,!\(\)\"]+', ' ', text)
text = re.sub('\s+', ' ', text)
text = re.sub('(\.\s*)+', '. ', text)
return text
| [
"ishrat@thebyte9.com"
] | ishrat@thebyte9.com |
39f48dfeed3f3313c308862c8550119fc3bc1641 | fc43470de13ff8f03105efc2a3660a1ed6a1a553 | /BAEKJOON/2504_괄호의값.py | 4cb3ea2ff67361cd93e8308808eef08938034270 | [] | no_license | youseop/Problem_solutions | 5a05597f188b4ef8f7d8483b46bf05fbf2158d01 | 1fba638d9520bca4354bca01f194f80b159e26aa | refs/heads/master | 2023-06-24T05:12:45.060086 | 2021-07-24T14:22:33 | 2021-07-24T14:22:33 | 298,317,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import sys
read=sys.stdin.readline
bracket = read().strip()
stack = []
for i in bracket:
if i =='(' or i =='[': stack.append(i)
elif i == ']':
sum = 0
while stack:
tmp = stack.pop()
if type(tmp) == type(1):
sum+=tmp
elif tmp == '[':
if sum==0: stack.append(3)
else: stack.append(sum*3)
break
else:
print(0)
exit()
else:
sum = 0
while stack:
tmp = stack.pop()
if type(tmp) == type(1):
sum+=tmp
elif tmp == '(':
if sum==0: stack.append(2)
else: stack.append(sum*2)
break
else:
print(0)
exit()
br=['(',')','[',']']
if any(i in stack for i in br):
print(0)
else:
sum=0
for i in stack:
sum+=i
print(sum)
| [
"66366941+youseop@users.noreply.github.com"
] | 66366941+youseop@users.noreply.github.com |
b11c66101b1e09ca12e76d8ce55d6ede96feff43 | 75f28905cc9d87d82be68a37a18beee8d6f21869 | /user_messages/urls.py | 732cdef2d4079ff7476434539d352d3c7f549367 | [] | no_license | Pavlenkovv/Cafe-heroku | f189bbd817a8736b43531bc5d73895fa436a8040 | e0e9b67247a3375e6f599b2dfcd77b1ccce9e5fb | refs/heads/main | 2023-02-27T14:48:24.549261 | 2021-01-29T12:55:19 | 2021-01-29T12:55:19 | 329,387,838 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django.urls import path
from .views import *
urlpatterns = [
path ('', home, name='messages_info'),
path('update/<int:pk>/', update_messages, name='update')
] | [
"pavlenko.vyacheslav@gmail.com"
] | pavlenko.vyacheslav@gmail.com |
6689f6b14bb0bd93a13ac8db5478c886fab76e6a | 2372281d6e08dfc517c60d5a0cce678f15f904db | /experiments/output_perturbation/scikit-learn/examples/preprocessing/plot_discretization_strategies.py | 9ef211a83ccf307e6861b3c49fdf16fd08a4849d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | amzn/differential-privacy-bayesian-optimization | b647b8281be0c2ea335104c9c92e717ce07ce63f | 07fec631d00bf3381ca24f7d73757aef6dfda9d3 | refs/heads/master | 2023-03-13T04:10:22.753152 | 2022-10-03T19:26:44 | 2022-10-03T19:26:44 | 252,813,133 | 27 | 19 | Apache-2.0 | 2022-10-03T19:26:45 | 2020-04-03T18:45:17 | Python | UTF-8 | Python | false | false | 3,052 | py | # -*- coding: utf-8 -*-
"""
==========================================================
Demonstrating the different strategies of KBinsDiscretizer
==========================================================
This example presents the different strategies implemented in KBinsDiscretizer:
- 'uniform': The discretization is uniform in each feature, which means that
the bin widths are constant in each dimension.
- quantile': The discretization is done on the quantiled values, which means
that each bin has approximately the same number of samples.
- 'kmeans': The discretization is based on the centroids of a KMeans clustering
procedure.
The plot shows the regions where the discretized encoding is constant.
"""
# Author: Tom Dupré la Tour
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.datasets import make_blobs
print(__doc__)
strategies = ['uniform', 'quantile', 'kmeans']
n_samples = 200
centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]])
centers_1 = np.array([[0, 0], [3, 1]])
# construct the datasets
random_state = 42
X_list = [
np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)),
make_blobs(n_samples=[n_samples // 10, n_samples * 4 // 10,
n_samples // 10, n_samples * 4 // 10],
cluster_std=0.5, centers=centers_0,
random_state=random_state)[0],
make_blobs(n_samples=[n_samples // 5, n_samples * 4 // 5],
cluster_std=0.5, centers=centers_1,
random_state=random_state)[0],
]
figure = plt.figure(figsize=(14, 9))
i = 1
for ds_cnt, X in enumerate(X_list):
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
ax.scatter(X[:, 0], X[:, 1], edgecolors='k')
if ds_cnt == 0:
ax.set_title("Input data", size=14)
xx, yy = np.meshgrid(
np.linspace(X[:, 0].min(), X[:, 0].max(), 300),
np.linspace(X[:, 1].min(), X[:, 1].max(), 300))
grid = np.c_[xx.ravel(), yy.ravel()]
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# transform the dataset with KBinsDiscretizer
for strategy in strategies:
enc = KBinsDiscretizer(n_bins=4, encode='ordinal', strategy=strategy)
enc.fit(X)
grid_encoded = enc.transform(grid)
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
# horizontal stripes
horizontal = grid_encoded[:, 0].reshape(xx.shape)
ax.contourf(xx, yy, horizontal, alpha=.5)
# vertical stripes
vertical = grid_encoded[:, 1].reshape(xx.shape)
ax.contourf(xx, yy, vertical, alpha=.5)
ax.scatter(X[:, 0], X[:, 1], edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title("strategy='%s'" % (strategy, ), size=14)
i += 1
plt.tight_layout()
plt.show()
| [
"tdiethe@amazon.com"
] | tdiethe@amazon.com |
980895b0e1bce4169f2bdcb8aa270a1ae9dd834c | 24b2f3f5f49ed19cf7fd3dcd433d6b72806e08cf | /python/array/0054_Spiral_Matrix.py | 72264e152eccf0198675fc6229fbbc7746b9e527 | [] | no_license | lizzzcai/leetcode | 97089e4ca8c3c53b5a4a50de899591be415bac37 | 551cd3b4616c16a6562eb7c577ce671b419f0616 | refs/heads/master | 2021-06-23T05:59:56.928042 | 2020-12-07T03:07:58 | 2020-12-07T03:07:58 | 162,840,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | """
18/09/2019
54. Spiral Matrix - Medium
Tag: Array
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
Example 1:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,3,6,9,8,7,4,5]
Example 2:
Input:
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12]
]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
"""
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
"""
Time: O(n*m)
Space: O(n*m)
"""
if not matrix:
return []
d = [(1,0), (0,1), (-1,0), (0,-1)]
m, n = len(matrix[0]), len(matrix)
record = [[False for _ in range(m)] for _ in range(n)]
print(f"m: {m}, n: {n}")
idx = 0
x, y = 0, 0
res = []
#record[0][0] = True
for _ in range(m*n):
print(f"x: {x}, y: {y}")
record[y][x] = True
res.append(matrix[y][x])
print(f"value: {matrix[y][x]}")
next_x, next_y = x + d[idx%4][0], y + d[idx%4][1]
print(f"next_x: {next_x}, next_y: {next_y}")
if (next_x < 0 or next_x > m-1) or (next_y < 0 or next_y > n-1) or record[next_y][next_x]:
idx += 1
print(f"idx: {idx}")
#if (0<= next_x <= m-1) and (0 <= next_y <= n-1) and not record[next_y][next_x]:
x, y = x + d[idx%4][0], y + d[idx%4][1]
return res
# Unit Test
import unittest
class spiralOrderCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_spiralOrder(self):
func = Solution().spiralOrder
self.assertEqual(func([
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]), [1,2,3,6,9,8,7,4,5])
self.assertEqual(func([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12]
]), [1,2,3,4,8,12,11,10,9,5,6,7])
self.assertEqual(func([
]), [])
if __name__ == '__main__':
unittest.main()
| [
"lilcolinn@gmail.com"
] | lilcolinn@gmail.com |
e291db95fbe82bddb3fcaf8c6ce1776e9741bdfa | 6bbcf512837bdcdfec3091b3337d54a8c455c7b9 | /practice/Operators.py | af03cb45e6fa25e1e62edaac81ae702144e24ec8 | [] | no_license | sachinlokesh05/Python-Core-Programs | a342ebdc7be070b66254e505df044fdaf03f147f | 8eec5595b51203d559e1d6f0e40646e63ad3645a | refs/heads/master | 2022-04-28T02:32:07.101993 | 2020-04-09T12:03:01 | 2020-04-09T12:03:01 | 249,491,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class Operators:
a=None
b=None
c=None
def __init__(self, a,b,c):
self.__aa=a
self.__bb=b
self.__cc=c
def operation(self):
choice=int(input("enter choice of your: "))
if choice == 1:
return (self.__aa + self.__bb * self.__cc)
elif choice == 2:
return (self.__cc + self.__aa / self.__cc)
elif choice == 3:
return (self.__aa % self.__bb + self.__cc)
elif choice == 4 :
return (self.__aa * self.__bb + self.__cc)
else:
print("Wrong choice")
return self.operation()
ap=Operators(1,2,5)
print('''
1. a + b * c
3. c + a / b
2. a % b + c
4. a * b + c
''')
print(ap.operation())
| [
"sachin.beee.15@acharya.ac.in"
] | sachin.beee.15@acharya.ac.in |
55096585b424ea3637fe9d43bddf009256acc018 | 8890925319a25dc3df29f53d0d8125d347680f68 | /looker_client_31/looker_sdk/oidc_user_attribute_write.py | 919915817144e74ca57abbc588f370b45226b734 | [
"MIT"
] | permissive | ContrastingSounds/looker_sdk_31 | f5d300ae54aee1cc5a2621b36b49541db24ed248 | f973434049fff1b605b10086ab8b84f2f62e3489 | refs/heads/master | 2020-03-19T20:31:24.785373 | 2018-06-11T09:41:36 | 2018-06-11T09:41:36 | 136,802,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,870 | py | # coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OIDCUserAttributeWrite(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'required': 'bool',
'user_attribute_ids': 'list[int]',
'url': 'str',
'can': 'dict(str, bool)'
}
attribute_map = {
'name': 'name',
'required': 'required',
'user_attribute_ids': 'user_attribute_ids',
'url': 'url',
'can': 'can'
}
def __init__(self, name=None, required=None, user_attribute_ids=None, url=None, can=None): # noqa: E501
"""OIDCUserAttributeWrite - a model defined in Swagger""" # noqa: E501
self._name = None
self._required = None
self._user_attribute_ids = None
self._url = None
self._can = None
self.discriminator = None
if name is not None:
self.name = name
if required is not None:
self.required = required
if user_attribute_ids is not None:
self.user_attribute_ids = user_attribute_ids
if url is not None:
self.url = url
if can is not None:
self.can = can
@property
def name(self):
"""Gets the name of this OIDCUserAttributeWrite. # noqa: E501
Name of User Attribute in OIDC # noqa: E501
:return: The name of this OIDCUserAttributeWrite. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OIDCUserAttributeWrite.
Name of User Attribute in OIDC # noqa: E501
:param name: The name of this OIDCUserAttributeWrite. # noqa: E501
:type: str
"""
self._name = name
@property
def required(self):
"""Gets the required of this OIDCUserAttributeWrite. # noqa: E501
Required to be in OIDC assertion for login to be allowed to succeed # noqa: E501
:return: The required of this OIDCUserAttributeWrite. # noqa: E501
:rtype: bool
"""
return self._required
@required.setter
def required(self, required):
"""Sets the required of this OIDCUserAttributeWrite.
Required to be in OIDC assertion for login to be allowed to succeed # noqa: E501
:param required: The required of this OIDCUserAttributeWrite. # noqa: E501
:type: bool
"""
self._required = required
@property
def user_attribute_ids(self):
"""Gets the user_attribute_ids of this OIDCUserAttributeWrite. # noqa: E501
Looker User Attribute Ids # noqa: E501
:return: The user_attribute_ids of this OIDCUserAttributeWrite. # noqa: E501
:rtype: list[int]
"""
return self._user_attribute_ids
@user_attribute_ids.setter
def user_attribute_ids(self, user_attribute_ids):
"""Sets the user_attribute_ids of this OIDCUserAttributeWrite.
Looker User Attribute Ids # noqa: E501
:param user_attribute_ids: The user_attribute_ids of this OIDCUserAttributeWrite. # noqa: E501
:type: list[int]
"""
self._user_attribute_ids = user_attribute_ids
@property
def url(self):
"""Gets the url of this OIDCUserAttributeWrite. # noqa: E501
Link to oidc config # noqa: E501
:return: The url of this OIDCUserAttributeWrite. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this OIDCUserAttributeWrite.
Link to oidc config # noqa: E501
:param url: The url of this OIDCUserAttributeWrite. # noqa: E501
:type: str
"""
self._url = url
@property
def can(self):
"""Gets the can of this OIDCUserAttributeWrite. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this OIDCUserAttributeWrite. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this OIDCUserAttributeWrite.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this OIDCUserAttributeWrite. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OIDCUserAttributeWrite):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"looker@MacBook-Pro.local"
] | looker@MacBook-Pro.local |
939acab1fd12f3f792ee8ed5327b4a92cbb34516 | 49c2e3ebf7f5d2f79af6e26c44b4d07ec14a20d5 | /Hello World/venv/Lib/site-packages/pip/_vendor/progress/__init__.py | ffcb82afcfdd11034008e84488dfde21c6399cc7 | [] | no_license | TaylorHoll/Python_Projects | a0d86642463bdc5b3ea67dae0146c115185c1db2 | a8285b058ed0b4e0a366753d61526056dab23cd3 | refs/heads/master | 2020-06-13T09:04:29.666639 | 2020-01-07T03:40:25 | 2020-01-07T03:40:25 | 194,608,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,859 | py | # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division, print_function
from datetime import timedelta
from math import ceil
from sys import stderr
from collections import deque
try:
from time import monotonic
except ImportError:
from time import time as monotonic
__version__ = '1.5'
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class Infinite(object):
file = stderr
sma_window = 10 # Simple Moving Average window
check_tty = True
hide_cursor = True
def __init__(self, message='', **kwargs):
self.index = 0
self.start_ts = monotonic()
self.avg = 0
self._avg_update_ts = self.start_ts
self._ts = self.start_ts
self._xput = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
self._width = 0
self.message = message
if self.file and self.is_tty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def elapsed(self):
return int(monotonic() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update_avg(self, n, dt):
if n > 0:
xput_len = len(self._xput)
self._xput.append(dt / n)
now = monotonic()
# update when we're still filling _xput, then after every second
if (xput_len < self.sma_window or
now - self._avg_update_ts > 1):
self.avg = sum(self._xput) / len(self._xput)
self._avg_update_ts = now
def update(self):
pass
def start(self):
pass
def clearln(self):
if self.file and self.is_tty():
print('\r\x1b[K', end='', file=self.file)
def write(self, s):
if self.file and self.is_tty():
line = self.message + s.ljust(self._width)
print('\r' + line, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def writeln(self, line):
if self.file and self.is_tty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file and self.is_tty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
def is_tty(self):
return self.file.isatty() if self.check_tty else True
def next(self, n=1):
now = monotonic()
dt = now - self._ts
self.update_avg(n, dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
with self:
for x in it:
yield x
self.next()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
with self:
for x in it:
yield x
self.next()
| [
"taylorholloway1984@gmail.com"
] | taylorholloway1984@gmail.com |
af4646c0c8d791591845ff4194705e2db2ffaf58 | 08dfaf714830a6310742dcd50848790d595e838e | /中级班/chapter03/code_06_rotateMatrix.py | 77d7da7638bb7ab3952d12a1d9398029625a3035 | [] | no_license | Tokyo113/leetcode_python | d9e0fb96a76efaadcec7aad08f5ef542d898d434 | e86b3fb26aef1cf63727e3e5c9fd4ddc9bedb7f1 | refs/heads/master | 2020-08-10T15:36:10.364714 | 2020-04-13T08:28:53 | 2020-04-13T08:28:53 | 214,369,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | #coding:utf-8
'''
@Time: 2020/2/13 16:58
@author: Tokyo
@file: code_06_rotateMatrix.py
@desc:
给定一个正方形矩阵,只用有限几个变量,实现矩阵中每个位置的数顺时针转动
90度,比如如下的矩阵
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
矩阵应该被调整为:
12 8 4 0
13 9 5 1
14 10 6 2
15 11 7 3
'''
def rotateMatrix(arr):
n = len(arr)
m = len(arr[0])
lr,lc = 0, 0
rr, rc = n-1, m-1
while lr < rr:
rotateEdge(arr, lr,lc,rr,rc)
lr += 1
lc += 1
rr -= 1
rc -= 1
def rotateEdge(arr, lr,lc, rr,rc):
for i in range(rc-lc):
pp = arr[lr][lc+i]
arr[lr][lc+i] = arr[rr-i][lc]
arr[rr-i][lc] = arr[rr][rc-i]
arr[rr][rc-i] = arr[lr+i][rc]
arr[lr+i][rc] = pp
return
def printMatrix(arr):
for i in range(len(arr)):
for j in range(len(arr[0])):
print(arr[i][j], end="\t")
print("")
if __name__ == '__main__':
a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13,14,15,16]]
printMatrix(a)
print("=================")
rotateMatrix(a)
printMatrix(a)
| [
"21810179@zju.edu.cn"
] | 21810179@zju.edu.cn |
c0005faed0405a4c138d03792873eedbc657de80 | 062e43d41c6daa6943bfad8f4510d19e43840f96 | /src/yaml/xml.py | b2e06652a02cedabe4dfa5a2964201b3e9a8934b | [] | no_license | allefant/land | b61d60106d8224e01a34d7504e1310b5fb0bd373 | f78f0da9c57884be8819f022c6e6442f90d4434c | refs/heads/main | 2023-06-22T06:15:09.938629 | 2023-06-20T20:08:13 | 2023-06-20T20:08:13 | 47,664,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,850 | py | import land.land
import land.yaml
import global ctype
static enum XmlState:
Outside
ElementName
Attributes
AttributeName
AttributeStart
AttributeValue
static class XmlParser:
XmlState state
bool closing
LandBuffer *value
LandYaml *yaml
static def scalar(XmlParser *x):
land_buffer_add_char(x.value, 0)
land_yaml_add_scalar(x.yaml, land_strdup(x.value.buffer))
land_buffer_clear(x.value)
static def opt_scalar(XmlParser *x):
if x.value.n:
scalar(x)
static def discard_scalar(XmlParser *x):
land_buffer_clear(x.value)
#
# <a x="2">b<c>d</c>e<f y="3"/></a>
#
# [{"<":"a", "x":"2", ">":["b", {"<":"c", ">":["d"]}, "e", {"<":f", "y":"3"}]}]
#
#
def land_yaml_load_xml(str filename) -> LandYaml *:
LandFile *f = land_file_new(filename, "rb")
if not f:
land_log_message("Failed opening %s\n", filename)
return None
land_log_message("Parsing yaml %s\n", filename)
XmlParser x_
XmlParser *x = &x_
x.yaml = land_yaml_new(filename)
x.value = land_buffer_new()
x.state = Outside
x.closing = False
land_yaml_add_sequence(x.yaml) # root list of elements
while True:
int c = land_file_getc(f)
if c < 0:
break
if x.state == Outside:
if c == '<':
opt_scalar(x)
x.state = ElementName
continue
elif x.state == ElementName:
if c == '/':
x.closing = True
continue
elif c == '>':
if x.closing:
discard_scalar(x)
close_tag(x)
land_yaml_done(x.yaml) # content
else:
create_tag(x)
open_tag(x) # no attributes
continue
elif isspace(c):
create_tag(x)
x.state = Attributes
continue
elif x.state == Attributes:
if isspace(c):
continue
elif c == '/':
x.closing = True
continue
elif c == '?': # to deal with the XML header
x.closing = True
continue
elif c == '>':
if x.closing:
close_tag(x)
else:
open_tag(x)
continue
elif c == '=':
scalar(x)
x.state = AttributeStart
continue
elif x.state == AttributeStart:
if c == '"':
x.state = AttributeValue
continue
elif x.state == AttributeValue:
if c == '"':
x.state = Attributes
scalar(x)
continue
add_char(x, c)
land_yaml_done(x.yaml) # root list of elements
land_file_destroy(f)
land_buffer_destroy(x.value)
return x.yaml
static def add_char(XmlParser *x, char c):
land_buffer_add_char(x.value, c)
static def create_tag(XmlParser *x):
land_yaml_add_mapping(x.yaml) # tag mapping
land_yaml_add_scalar(x.yaml, land_strdup("<"))
scalar(x)
static def open_tag(XmlParser *x):
x.state = Outside
land_yaml_add_scalar(x.yaml, land_strdup(">"))
land_yaml_add_sequence(x.yaml) # content
static def close_tag(XmlParser *x):
land_yaml_done(x.yaml) # tag mapping
x.state = Outside
x.closing = False
# saving XML
static def xml_write(YamlParser *p, char const *s, bool can_break_before):
int n = strlen(s)
if can_break_before and p.line_length + n > 80:
land_file_write(p.file, "\n", 1)
p.line_length = 0
land_file_write(p.file, s, n)
int i = land_find(s, "\n")
if i >= 0:
p.line_length = n - 1 - i
else:
p.line_length += n
static def xml_save_mapping(LandYamlEntry *e, YamlParser *p) -> bool:
str name = land_yaml_get_entry_scalar(e, "<")
if not name: return False
xml_write(p, "<", False)
xml_write(p, name, False)
for char const *key in LandArray *e.sequence:
if land_equals(key, "<") or land_equals(key, ">"): continue
xml_write(p, " ", False)
xml_write(p, key, True)
xml_write(p, "=\"", False)
str value = land_yaml_get_entry_scalar(e, key)
xml_write(p, value, False)
xml_write(p, "\"", False)
LandYamlEntry *contents = land_yaml_get_entry(e, ">")
if contents:
xml_write(p, ">", True)
xml_save_sequence(contents, p)
xml_write(p, "</", False)
xml_write(p, name, False)
xml_write(p, ">", True)
else:
xml_write(p, " />", True)
return True
static def xml_save_sequence(LandYamlEntry *e, YamlParser *p) -> bool:
for LandYamlEntry *e2 in LandArray *e.sequence:
xml_save_entry(e2, p)
return True
static def xml_save_scalar(LandYamlEntry *e, YamlParser *p) -> bool:
xml_write(p, e.scalar, False)
return True
static def xml_save_entry(LandYamlEntry *e, YamlParser *p) -> bool:
if e.type == YamlMapping:
return xml_save_mapping(e, p)
elif e.type == YamlSequence:
return xml_save_sequence(e, p)
elif e.type == YamlScalar:
return xml_save_scalar(e, p)
return false
def land_yaml_save_xml(LandYaml *yaml):
LandFile *f = land_file_new(yaml.filename, "wb")
if not f:
goto error
YamlParser p
memset(&p, 0, sizeof p)
p.file = f
if not xml_save_entry(yaml.root, &p): goto error
label error
if f: land_file_destroy(f)
def _xml(LandYaml *yaml):
if not yaml.root or not yaml.parent:
land_yaml_add_sequence(yaml)
elif yaml.parent->type == YamlMapping:
land_yaml_add_scalar(yaml, ">")
land_yaml_add_sequence(yaml)
def land_yaml_xml_tag(LandYaml *yaml, str name):
_xml(yaml)
land_yaml_add_mapping(yaml)
land_yaml_add_scalar(yaml, "<")
land_yaml_add_scalar(yaml, name)
def land_yaml_xml_tag_with_content(LandYaml *yaml, str name, str content):
land_yaml_xml_tag(yaml, name)
land_yaml_xml_content(yaml, content)
land_yaml_xml_end(yaml)
def land_yaml_xml_content(LandYaml *yaml, str content):
_xml(yaml)
land_yaml_add_scalar(yaml, content)
def land_yaml_xml_attribute(LandYaml *yaml, str key, value):
land_yaml_add_scalar(yaml, key)
land_yaml_add_scalar(yaml, value)
def land_yaml_xml_end(LandYaml *yaml):
land_yaml_done(yaml)
# If we close a tag, we close the mapping, so additional children
# can be added. When we close the parent, we just closed the
# sequence, but we also need to close the mapping. Basically we
# always need to be in a sequence after this function returns.
if yaml.parent and yaml.parent->type == YamlMapping:
land_yaml_done(yaml)
| [
"elias@users.sourceforge.net"
] | elias@users.sourceforge.net |
f0d1317a953a4569a174b0fc00a48f7a62f38d1b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-cbs/huaweicloudsdkcbs/v1/model/collect_key_words_request.py | f6637441f094ae9a0789038a9ec0137977bbd716 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,869 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CollectKeyWordsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'qabot_id': 'str',
'start_time': 'str',
'end_time': 'str',
'top': 'int'
}
attribute_map = {
'qabot_id': 'qabot_id',
'start_time': 'start_time',
'end_time': 'end_time',
'top': 'top'
}
def __init__(self, qabot_id=None, start_time=None, end_time=None, top=None):
"""CollectKeyWordsRequest
The model defined in huaweicloud sdk
:param qabot_id: qabot编号,UUID格式。
:type qabot_id: str
:param start_time: 查询的起始时间,long,UTC时间,默认值为0。
:type start_time: str
:param end_time: 查询的结束时间,long,UTC时间,默认值为当前时间的毫秒数。
:type end_time: str
:param top: 关键词最多显示的个数,默认值为10,取值范围0-50。
:type top: int
"""
self._qabot_id = None
self._start_time = None
self._end_time = None
self._top = None
self.discriminator = None
self.qabot_id = qabot_id
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if top is not None:
self.top = top
@property
def qabot_id(self):
"""Gets the qabot_id of this CollectKeyWordsRequest.
qabot编号,UUID格式。
:return: The qabot_id of this CollectKeyWordsRequest.
:rtype: str
"""
return self._qabot_id
@qabot_id.setter
def qabot_id(self, qabot_id):
"""Sets the qabot_id of this CollectKeyWordsRequest.
qabot编号,UUID格式。
:param qabot_id: The qabot_id of this CollectKeyWordsRequest.
:type qabot_id: str
"""
self._qabot_id = qabot_id
@property
def start_time(self):
"""Gets the start_time of this CollectKeyWordsRequest.
查询的起始时间,long,UTC时间,默认值为0。
:return: The start_time of this CollectKeyWordsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this CollectKeyWordsRequest.
查询的起始时间,long,UTC时间,默认值为0。
:param start_time: The start_time of this CollectKeyWordsRequest.
:type start_time: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this CollectKeyWordsRequest.
查询的结束时间,long,UTC时间,默认值为当前时间的毫秒数。
:return: The end_time of this CollectKeyWordsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this CollectKeyWordsRequest.
查询的结束时间,long,UTC时间,默认值为当前时间的毫秒数。
:param end_time: The end_time of this CollectKeyWordsRequest.
:type end_time: str
"""
self._end_time = end_time
@property
def top(self):
"""Gets the top of this CollectKeyWordsRequest.
关键词最多显示的个数,默认值为10,取值范围0-50。
:return: The top of this CollectKeyWordsRequest.
:rtype: int
"""
return self._top
@top.setter
def top(self, top):
"""Sets the top of this CollectKeyWordsRequest.
关键词最多显示的个数,默认值为10,取值范围0-50。
:param top: The top of this CollectKeyWordsRequest.
:type top: int
"""
self._top = top
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectKeyWordsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
648b0989580d47d21becb42dd15835780632c9e9 | 7af9841dfdeb7192cee9f5bc5ae24ebabeeebdcc | /article/admin.py | a36223f706dc91c9321ddeaf13c4ce75ccbd793c | [] | no_license | dimansion/bepy | 513d1d6b8c6f679ce97f46741b50b73dabf20484 | dd92999b9fb0d65e9479372718409785a8d26d26 | refs/heads/master | 2020-06-28T11:27:02.204255 | 2016-11-14T11:26:32 | 2016-11-14T11:26:32 | 67,694,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from django.contrib import admin
# Register your models here.
from .models import Article
class ArticleModelAdmin(admin.ModelAdmin):
list_display = ["title", "updated", "timestamp"]
list_display_links = ["updated"]
list_editable = ["title"]
list_filter = ["updated", "timestamp"]
search_fields = ["title", "content"]
class Meta:
model = Article
admin.site.register(Article, ArticleModelAdmin) | [
"dimansional@gmail.com"
] | dimansional@gmail.com |
bc07ca5cbd963e7bdc3369aae466a3c181a9c7bb | 50de54517ef5e157b43598e412c477fd66890a3e | /Assignment 04/Problem 12.py | 324d03a9fc0e2eb71aefe5e681cb03cda366f9f8 | [] | no_license | Shihabsarker93/BRACU-CSE111 | f530be247bebaaee9cc5e85948dc070adae0c6ae | 17c95c76f84abffe9d9bdcb5861fbacbc510b5a6 | refs/heads/main | 2023-08-13T15:33:57.331850 | 2021-10-07T10:56:09 | 2021-10-07T10:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | class ParcelKoro:
def __init__(self, name=None, product_weight=None) -> None:
self.name = name
self.product_weight = product_weight
if self.product_weight == None:
self.product_weight = 0
def calculateFee(self, location=None):
self.location = location
if location == None:
location_charge = 50
else:
location_charge = 100
if self.product_weight == 0:
self.total_fee = 0
else:
self.total_fee = (self.product_weight * 20) + location_charge
def printDetails(self):
if self.name == None:
self.name = "No name set"
print(f"Customer Name: {self.name}")
print(f"Product Weight: {self.product_weight}")
print(f"Total fee: {self.total_fee}")
print("**********************")
p1 = ParcelKoro()
p1.calculateFee()
p1.printDetails()
print("**********************")
p2 = ParcelKoro("Bob The Builder")
p2.calculateFee()
p2.printDetails()
print("----------------------------")
p2.product_weight = 15
p2.calculateFee()
p2.printDetails()
print("**********************")
p3 = ParcelKoro("Dora The Explorer", 10)
p3.calculateFee("Dhanmondi")
p3.printDetails()
| [
"mirzamahrabhossain@gmail.com"
] | mirzamahrabhossain@gmail.com |
bdb495a4aaf1752cb932eda410fe95cca71f3510 | b0d5e423f09181a322a0166b06bf7fe45a3befc0 | /MetioTube/profiles/forms.py | b17499317bd2ae783e5fe56a9baa71d145f4935f | [
"MIT"
] | permissive | Sheko1/MetioTube | f5da4184bb1590565ba34cef2fff02b379ab3e56 | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | refs/heads/main | 2023-07-04T12:54:57.500778 | 2021-08-14T19:41:56 | 2021-08-14T19:41:56 | 383,907,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django import forms
from MetioTube.profiles.models import Profile
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['profile_picture'].widget.attrs['accept'] = 'image/jpg, image/png, image/jpeg'
class Meta:
model = Profile
exclude = ('user', 'subscribers')
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
83ea1cd60b07b7661cf4d71ba9914ae9e4c12194 | deda76cdf57c2a178f7a6af0ef8abf0c239d0fbf | /post/migrations/0010_auto_20170225_1412.py | 3b015c6440e5213db5654f71dec65202d685fad1 | [] | no_license | mikekeda/Jess-blog | 55c03deaa3587f4280cb77f4c33a4728965f7503 | 0498450c671b7116e759ee608b60a56cf5c1722c | refs/heads/master | 2023-07-26T21:25:31.519523 | 2023-06-06T17:27:13 | 2023-06-06T17:27:13 | 69,493,913 | 2 | 0 | null | 2023-07-05T22:57:16 | 2016-09-28T18:53:57 | Python | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-25 14:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0009_auto_20170225_1410'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(editable=False, unique=True),
),
]
| [
"mriynuk@gmail.com"
] | mriynuk@gmail.com |
2eab272b612bf0026bdfa21c63ff576d34fd8dde | 8780bc7f252f14ff5406ce965733c099034920b7 | /pyCode/novel_Mongodb/novel/settings.py | e49a3b889bb618337c2e82ee26bcf3662da38c06 | [] | no_license | 13661892653/workspace | 5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b | 17960becabb3b4f0fc30009c71a11c4f7a5f8330 | refs/heads/master | 2020-12-24T20:00:15.541432 | 2018-08-14T13:56:15 | 2018-08-14T13:56:15 | 86,225,975 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | # -*- coding: utf-8 -*-
# Scrapy settings for novel project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'novel'
SPIDER_MODULES = ['novel.spiders']
NEWSPIDER_MODULE = 'novel.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'novel (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'novel.pipelines.MongoDBPipeline': 300,
}
MONGODB_HOST = "127.0.0.1"
MONGODB_PORT = 27017
MONGODB_DB = 'Jikexueyuan'
MONGODB_COLL = 'novel'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'novel.middlewares.NovelSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'novel.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'novel.pipelines.MongoDBPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"470563152@qq.com"
] | 470563152@qq.com |
a8eb9ab1b268fd5c994efa2afb0f3d85321e1cf9 | 06d86ca0465405a7d1a64fc6dbf4980f76565e54 | /torchnlp/nn/__init__.py | be635ea9026ee2911bec1756db558956b484099d | [
"BSD-3-Clause"
] | permissive | PetrochukM/PyTorch-NLP | 22b7f2628d6545270bc36964ce4551609f84ca9f | 53d7edcb8e0c099efce7c2ddf8cd7c44157fcac3 | refs/heads/master | 2023-08-05T20:15:06.954467 | 2023-07-04T21:11:26 | 2023-07-04T21:11:26 | 122,806,629 | 2,304 | 290 | BSD-3-Clause | 2022-07-16T23:44:23 | 2018-02-25T05:00:36 | Python | UTF-8 | Python | false | false | 499 | py | from torchnlp.nn.attention import Attention
from torchnlp.nn.lock_dropout import LockedDropout
from torchnlp.nn.weight_drop import WeightDropGRU
from torchnlp.nn.weight_drop import WeightDropLSTM
from torchnlp.nn.weight_drop import WeightDropLinear
from torchnlp.nn.weight_drop import WeightDrop
from torchnlp.nn.cnn_encoder import CNNEncoder
__all__ = [
'LockedDropout',
'Attention',
'CNNEncoder',
'WeightDrop',
'WeightDropGRU',
'WeightDropLSTM',
'WeightDropLinear',
]
| [
"petrochukm@gmail.com"
] | petrochukm@gmail.com |
412a3e50c46c08906300fcef62d66e697c2954e4 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_Newyork167_RevengeOfThePancakes.py | b033b830db3257268cd1574abffce775324afeeb | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 503 | py | def start():
output_file = open('output.txt', 'w+')
t = int(raw_input().strip())
for x in range(t):
flip_count = 0
p = raw_input().strip()
for y in xrange(len(p) - 1, -1, -1):
if p[y] == "-":
flip_count += 1
p = flip(p[:y + 1]) + p[y + 1:]
output_file.write("Case #{case}: {result}\n".format(case=x+1, result=flip_count))
def flip(p):
return ''.join([''.join(x) for x in ["+" if x == "-" else "-" for x in p]]) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
d653b9469d563ede22ef0db716328cbd291036b3 | 6466eef5477db250879a74935b3b776dc878ff3b | /iprofile/migrations/0002_auto_20210302_0739.py | d5634f9ccf7c6c6bfbe7220e774c7d58326a69ca | [] | no_license | BakdauletBolatE/django_ideas | 8edb61a569f436865283e82edba3377a150665a8 | ef0258f3aae0c090d38a5098d175bceaddcf67af | refs/heads/master | 2023-03-12T00:02:04.969353 | 2021-03-02T19:41:00 | 2021-03-02T19:41:00 | 324,287,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 3.1.4 on 2021-03-02 07:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iprofile', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='specialization',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='iprofile.specialization'),
),
]
| [
"bakosh21345@gmail.com"
] | bakosh21345@gmail.com |
1e2c08e559a9397aff2ade4a07738486067ea297 | 77428d258556f1cae13c7435bcb5ee387d2f7ed9 | /src/program/python/snippet/ProcExit.py | 23d60713b22cf13747dd1e925f813b5c6bffea4a | [] | no_license | imxood/imxood.github.io | d598d3d991f7e7d39787ecb2415ffe48489d9fd6 | a6fe8fe069b8af9d65b6afaabecfcfe99ed1ed21 | refs/heads/main | 2022-10-23T12:52:11.966389 | 2022-10-04T06:04:59 | 2022-10-04T06:04:59 | 47,911,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | class ProcExit(Exception):
def __init__(self, message, status):
super().__init__(message, status)
self.message = message
self.status = status
| [
"imxood@gmail.com"
] | imxood@gmail.com |
b2821e252c94e69a8e4c84bc04d5b2f793b836f8 | c10f20abec372f81dbd6468ead208543f60940f1 | /learning/22.LDA/22.3.reuters.py | 9fcc780c9a33d39ab337bd60e3a9bc6a7ec0b357 | [] | no_license | alenzhd/meachineLearning | 64876e7a6c0b8b39a63a9eb586d306a3489b4447 | 1b66ce2f73b226548f07e45c8537b8286635a048 | refs/heads/master | 2021-08-24T10:55:52.056439 | 2017-12-09T10:26:37 | 2017-12-09T10:26:37 | 112,688,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | # !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import lda
import lda.datasets
from pprint import pprint
if __name__ == "__main__":
# document-term matrix
X = lda.datasets.load_reuters()
print("type(X): {}".format(type(X)))
print("shape: {}\n".format(X.shape))
print(X[:10, :10])
# the vocab
vocab = lda.datasets.load_reuters_vocab()
print("type(vocab): {}".format(type(vocab)))
print("len(vocab): {}\n".format(len(vocab)))
print(vocab[:10])
# titles for each story
titles = lda.datasets.load_reuters_titles()
print("type(titles): {}".format(type(titles)))
print("len(titles): {}\n".format(len(titles)))
pprint(titles[:10])
print ('LDA start ----')
topic_num = 20
model = lda.LDA(n_topics=topic_num, n_iter=800, random_state=1)
model.fit(X)
# topic-word
topic_word = model.topic_word_
print("type(topic_word): {}".format(type(topic_word)))
print("shape: {}".format(topic_word.shape))
print(vocab[:5])
print(topic_word[:, :5])
# Print Topic distribution
n = 7
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n + 1):-1]
print('*Topic {}\n- {}'.format(i, ' '.join(topic_words)))
# Document - topic
doc_topic = model.doc_topic_
print("type(doc_topic): {}".format(type(doc_topic)))
print("shape: {}".format(doc_topic.shape))
for i in range(10):
topic_most_pr = doc_topic[i].argmax()
print(u"文档: {} 主题: {} value: {}".format(i, topic_most_pr, doc_topic[i][topic_most_pr]))
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
# Topic - word
plt.figure(figsize=(8, 9))
# f, ax = plt.subplots(5, 1, sharex=True)
for i, k in enumerate([0, 5, 9, 14, 19]):
ax = plt.subplot(5, 1, i+1)
ax.plot(topic_word[k, :], 'r-')
ax.set_xlim(-50, 4350) # [0,4258]
ax.set_ylim(0, 0.08)
ax.set_ylabel(u"概率")
ax.set_title(u"主题 {}".format(k))
plt.xlabel(u"词", fontsize=14)
plt.tight_layout()
plt.suptitle(u'主题的词分布', fontsize=18)
plt.subplots_adjust(top=0.9)
plt.show()
# Document - Topic
plt.figure(figsize=(8, 9))
# f, ax= plt.subplots(5, 1, figsize=(8, 6), sharex=True)
for i, k in enumerate([1, 3, 4, 8, 9]):
ax = plt.subplot(5, 1, i+1)
ax.stem(doc_topic[k, :], linefmt='g-', markerfmt='ro')
ax.set_xlim(-1, topic_num+1)
ax.set_ylim(0, 1)
ax.set_ylabel(u"概率")
ax.set_title(u"文档 {}".format(k))
plt.xlabel(u"主题", fontsize=14)
plt.suptitle(u'文档的主题分布', fontsize=18)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.show()
| [
"zhanghd@asiainfo-mixdata.com"
] | zhanghd@asiainfo-mixdata.com |
b248e867f89bea426fd4105ea1bc0119dbf2cc49 | 56bcae383daea12cc1818a19a6415e0d9b58bd0c | /month01/day10/exercise04.py | be84b5e64c084983ff26f235609fb67556cb5474 | [] | no_license | wpy-111/python | 97ede872cf6b17f8c229cee9ecfb7df25363a37a | afbd0e081763c53833617a4892d03043e644d641 | refs/heads/main | 2023-08-03T18:26:25.656984 | 2021-09-26T08:47:21 | 2021-09-26T08:47:21 | 323,897,439 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | class Student:
def __init__(self, name, old, achement, gender):
self.name = name
self.old = old
self.achement = achement
self.gender = gender
def print_personal_info(self):
print("学生姓名:", self.name, "年龄:", self.old, "成绩:", self.achement, "性别:", self.gender)
list_student = [
Student("悟空", 27, 100, "男"),
Student("八戒", 30, 60, "男"),
Student("沙僧", 33, 70, "男"),
Student("唐僧", 20, 65, "女")
]
# 5.根据年龄,并按照升序将学生列表进行排序
def ascending():
for i in range(0, len(list_student) - 1):
for m in range(i + 1, len(list_student)):
if list_student[i].old > list_student[m].old:
list_student[i], list_student[m] = list_student[m], list_student[i]
ascending()
for item in list_student:
item.print_personal_info()
# 6.将学上列表中所有男生删除
def del_nan():
for item in list_student[::-1]:
if item.gender=="男":
list_student.remove(item)
del_nan()
for i in list_student:
i.print_personal_info() | [
"1324749417@qq.com"
] | 1324749417@qq.com |
bf732d79c691374c2a1fbe7b8c8e18e5e53ec00e | ac2bc51f88ca0d966968d3aa28331ac715d9a1e2 | /pymeshio/pmd/reader.py | d68a767ff889e6d5f4668235e16b345b35f6905f | [] | no_license | zhouhang95/pymeshio | 5b29a864253d75bbcc23e7514ea4f9c86338031c | 256dd9146103abc3e2e300de9ae09dcde057b534 | refs/heads/master | 2020-06-14T23:30:34.727047 | 2020-04-18T14:05:04 | 2020-04-18T14:05:04 | 195,154,815 | 0 | 0 | null | 2019-07-04T02:21:55 | 2019-07-04T02:21:55 | null | UTF-8 | Python | false | false | 7,910 | py | #coding: utf-8
"""
pmd reader
"""
import io
from .. import common
from .. import pmd
class Reader(common.BinaryReader):
"""pmx reader
"""
def __init__(self, ios, version):
super(Reader, self).__init__(ios)
self.version=version
def read_text(self, size):
"""read cp932 text
"""
src=self.unpack("%ds" % size, size)
assert(type(src)==bytes)
pos = src.find(b"\x00")
if pos==-1:
return src
else:
return src[:pos]
def read_vertex(self):
return pmd.Vertex(
self.read_vector3(),
self.read_vector3(),
self.read_vector2(),
self.read_uint(2),
self.read_uint(2),
self.read_uint(1),
self.read_uint(1))
def read_material(self):
return pmd.Material(
diffuse_color=self.read_rgb(),
alpha=self.read_float(),
specular_factor=self.read_float(),
specular_color=self.read_rgb(),
ambient_color=self.read_rgb(),
toon_index=self.read_int(1),
edge_flag=self.read_uint(1),
vertex_count=self.read_uint(4),
texture_file=self.read_text(20)
)
def read_bone(self):
name=self.read_text(20)
parent_index=self.read_uint(2)
tail_index=self.read_uint(2)
bone=pmd.createBone(name, self.read_uint(1))
bone.parent_index=parent_index
bone.tail_index=tail_index
bone.ik_index = self.read_uint(2)
bone.pos = self.read_vector3()
return bone
def read_ik(self):
ik=pmd.IK(self.read_uint(2), self.read_uint(2))
ik.length = self.read_uint(1)
ik.iterations = self.read_uint(2)
ik.weight = self.read_float()
ik.children=[self.read_uint(2) for _ in range(ik.length)]
return ik
def read_morph(self):
morph=pmd.Morph(self.read_text(20))
morph_size = self.read_uint(4)
morph.type = self.read_uint(1)
for j in range(morph_size):
morph.indices.append(self.read_uint(4))
morph.pos_list.append(self.read_vector3())
return morph
def read_rigidbody(self):
return pmd.RigidBody(
name=self.read_text(20),
bone_index=self.read_int(2),
collision_group=self.read_int(1),
no_collision_group=self.read_int(2),
shape_type=self.read_uint(1),
shape_size=self.read_vector3(),
shape_position=self.read_vector3(),
shape_rotation=self.read_vector3(),
mass=self.read_float(),
linear_damping=self.read_float(),
angular_damping=self.read_float(),
restitution=self.read_float(),
friction=self.read_float(),
mode=self.read_uint(1)
)
def read_joint(self):
return pmd.Joint(
name=self.read_text(20),
rigidbody_index_a=self.read_uint(4),
rigidbody_index_b=self.read_uint(4),
position=self.read_vector3(),
rotation=self.read_vector3(),
translation_limit_min=self.read_vector3(),
translation_limit_max=self.read_vector3(),
rotation_limit_min=self.read_vector3(),
rotation_limit_max=self.read_vector3(),
spring_constant_translation=self.read_vector3(),
spring_constant_rotation=self.read_vector3())
def __read(reader, model):
# model info
model.name=reader.read_text(20)
model.comment=reader.read_text(256)
# model data
model.vertices=[reader.read_vertex()
for _ in range(reader.read_uint(4))]
model.indices=[reader.read_uint(2)
for _ in range(reader.read_uint(4))]
model.materials=[reader.read_material()
for _ in range(reader.read_uint(4))]
model.bones=[reader.read_bone()
for _ in range(reader.read_uint(2))]
model.ik_list=[reader.read_ik()
for _ in range(reader.read_uint(2))]
model.morphs=[reader.read_morph()
for _ in range(reader.read_uint(2))]
model.morph_indices=[reader.read_uint(2)
for _ in range(reader.read_uint(1))]
model.bone_group_list=[pmd.BoneGroup(reader.read_text(50))
for _ in range(reader.read_uint(1))]
model.bone_display_list=[(reader.read_uint(2), reader.read_uint(1))
for _i in range(reader.read_uint(4))]
if reader.is_end():
# EOF
return True
############################################################
# extend1: english name
############################################################
if reader.read_uint(1)==1:
#return True
model.english_name=reader.read_text(20)
model.english_comment=reader.read_text(256)
for bone in model.bones:
bone.english_name=reader.read_text(20)
for morph in model.morphs:
if morph.name==b'base':
continue
morph.english_name=reader.read_text(20)
for g in model.bone_group_list:
g.english_name=reader.read_text(50)
############################################################
# extend2: toon_textures
############################################################
if reader.is_end():
# EOF
return True
model.toon_textures=[reader.read_text(100)
for _ in range(10)]
############################################################
# extend2: rigidbodies and joints
############################################################
if reader.is_end():
# EOF
return True
model.rigidbodies=[reader.read_rigidbody()
for _ in range(reader.read_uint(4))]
model.joints=[reader.read_joint()
for _ in range(reader.read_uint(4))]
return True
def read_from_file(path):
"""
read from file path, then return the pymeshio.pmd.Model.
:Parameters:
path
file path
>>> import pymeshio.pmd.reader
>>> m=pymeshio.pmd.reader.read_from_file('resources/初音ミクVer2.pmd')
>>> print(m)
<pmd-2.0 "Miku Hatsune" 12354vertices>
"""
pmd=read(io.BytesIO(common.readall(path)))
pmd.path=path
return pmd
def read(ios: io.IOBase):
"""
read from ios, then return the pymeshio.pmd.Model.
:Parameters:
ios
input stream (in io.IOBase)
>>> import pymeshio.pmd.reader
>>> m=pymeshio.pmd.reader.read(io.open('resources/初音ミクVer2.pmd', 'rb'))
>>> print(m)
<pmd-2.0 "Miku Hatsune" 12354vertices>
"""
assert(isinstance(ios, io.IOBase))
reader=common.BinaryReader(ios)
# header
signature=reader.unpack("3s", 3)
if signature!=b"Pmd":
raise common.ParseException(
"invalid signature: {0}".format(signature))
version=reader.read_float()
model=pmd.Model(version)
reader=Reader(reader.ios, version)
if(__read(reader, model)):
# check eof
if not reader.is_end():
#print("can not reach eof.")
pass
# build bone tree
for i, child in enumerate(model.bones):
child.index=i
if child.parent_index==0xFFFF:
# no parent
model.no_parent_bones.append(child)
child.parent=None
else:
# has parent
parent=model.bones[child.parent_index]
child.parent=parent
parent.children.append(child)
# 後位置
if child.hasChild():
child.tail=model.bones[child.tail_index].pos
return model
| [
"ousttrue@gmail.com"
] | ousttrue@gmail.com |
1fa6918b789095c70ac0a9b29a5bf35351e768ff | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/bases/anaconf/fichier_configuration.py | 3c393fe67c215a743548a07ae83628cef8b5bacf | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,208 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier décrit la classe FichierConfiguration, détaillée plus bas."""
import re
import textwrap
from .exceptions import *
class FichierConfiguration:
"""Cette classe définit un fichier de configuration.
Le fichier créé par cette classe est déjà ouvert. La classe se contente
de l'analyser et de placer les données dans un dictionnaire.
Elle est également en charge de mettre un jour un fichier en tenant
compte d'un autre fichier (mettre à jour un modèle en tenant compte
des données configurées, ici).
"""
def __init__(self, nom, chaine, logger):
"""Constructeur d'un fichier de configuration.
On lui passe la chaîne lue dans le fichier, non analysée.
Cette chaîne contient donc les données brutes, il faut l'analyser.
"""
self.nom = nom
self.fichier = chaine
self.donnees = {}
self.lignes = {}
self.logger = logger
# On analyse la chaîne
t_contenu = chaine.split("\n")
contenu = chaine
delimiteurs = ('\\', ',', '[', '{', '(')
# On lit les données
i = 0
while i < len(t_contenu):
ligne = t_contenu[i]
if ligne.strip() == "":
i += 1
continue
elif ligne.lstrip().startswith("#"):
i += 1
continue
elif "=" not in ligne:
self.logger.warning("[{}:{}]: le signe '=' n'a pas été " \
"trouvé ('{}')".format(self.nom, i + 1, ligne))
i += 1
else:
nom_donnee = ligne.split("=")[0].strip()
donnee = "=".join(ligne.split("=")[1:]).lstrip()
# Si la ligne se poursuit, on continue
ligne_debut = i
while ligne.rstrip()[-1] in delimiteurs or \
ligne.lstrip().startswith("#"):
i += 1
if i >= len(t_contenu):
break
ligne = t_contenu[i]
donnee += "\n" + ligne
ligne_fin = i
self.lignes[nom_donnee] = (ligne_debut, ligne_fin)
self.donnees[nom_donnee] = donnee
i += 1
def mettre_a_jour(self, autre_fichier):
"""Met à jour l'attribut 'chaine' en fonction d'un autre fichier.
On parcourt les données de cet autre fichier.
* Si la donnée est présente dans self.donnees, on la réécrit
sans savoir si elle est identique ou non, on l'écrase)
* Sinon on ne la réécrit pas.
"""
t_contenu = self.fichier.split("\n")
for nom_don, val_don in autre_fichier.donnees.items():
if nom_don in self.donnees.keys(): # la donnée existe
# On la met à jour
self.donnees[nom_don] = val_don
if nom_don not in self.lignes:
# La donnée n'a pas été trouvée
raise ErreurInterpretation("la donnée {} n'a pas " \
"été trouvée dans le fichier à mettre à " \
"jour".format(nom_don))
debut, fin = self.lignes[nom_don]
nv_val = nom_don + " = " + val_don
nv_val = nv_val.split("\n")
t_contenu = t_contenu[:debut] + nv_val + t_contenu[fin + 1:]
self.fichier = "\n".join(t_contenu)
| [
"kredh@free.fr"
] | kredh@free.fr |
7f5bf99bf96136efc47b1ad93ed0722deb7c4c24 | 663365d4c1c4068dab79a4b24cf6c96888b0862d | /Functions/migrations/0021_contribute_upload_file.py | 173945ddf3192fbd6275812a40ffb533ccbbbafa | [] | no_license | gitanjali1077/UnistashFinal | e7052b26db70d3ed728f7cddd90da31f6d4f0a50 | 31251e441c8759ca3d6c4b0cb274902293fd38a7 | refs/heads/master | 2021-07-02T07:01:10.919891 | 2017-09-23T11:55:03 | 2017-09-23T11:55:03 | 104,563,813 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-28 18:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Functions', '0020_contribute'),
]
operations = [
migrations.AddField(
model_name='contribute',
name='upload_file',
field=models.FileField(default='abc.jpg', upload_to='documents/'),
preserve_default=False,
),
]
| [
"gitanjali1077@gmail.com"
] | gitanjali1077@gmail.com |
c705ad27d574a1c68f53e7ea69f8e302c6e3bd45 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03360/s908615245.py | 1bd71f84ec89bec0620034eb77b6014bd7f9078f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | lis = list(map(int,input().split()))
a = int(input())
lis.sort(reverse = True)
for i in range(a):
lis[0] *= 2
print(sum(lis)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
396afefec4925ee05474afb55fca1e1c01268612 | 627fda699b0dc401a19b0cbbc17ea6632f315c8c | /baltimore.py | 123f209bc531b868c7a5fed241650c20e9899e8a | [] | no_license | Romulus83/python | 31db444e766ceff39813162f3e64edbb53bfdfdf | f5dc170ccd2b98f84a17f8bd8d8f5d8bd9a51f60 | refs/heads/master | 2023-02-16T16:07:18.947227 | 2021-01-16T17:08:40 | 2021-01-16T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 18:14:48 2020
@author: user
"""
"""
remove the dollar signs in the AnnualSalary field and assign it as a int
"""
import pandas as pd
df4 = pd.read_csv("Baltimore_City_Employee_Salaries_FY2014.csv")
df4["AnnualSalary"] = df4["AnnualSalary"].astype("int64")
"""
Group the data on JobTitle and AnnualSalary, and aggregate with sum, mean, etc.
Sort the data and display to show who get the highest salary
"""
df4[["AnnualSalary","JobTitle"]]
df4["AnnualSalary"].agg(['sum','mean'])
a = sorted(df4["AnnualSalary"])
df4["AnnualSalary"].max()
df4["AnnualSalary"].min()
"""
Try to group on JobTitle only and sort the data and display
"""
sorted(df4["JobTitle"])
"""
How many employess are there for each JobRoles and Graph it
"""
import matplotlib.pyplot as plt
plt.pie(df4["JobTitle"].value_counts(),labels = df4["JobTitle"].unique(),autopct = "%.2f",radius = 3)
"""
Graph and show which Job Title spends the most
"""
import matplotlib.pyplot as plt
plt.pie(df4["HireDate"].value_counts(dropna = False),labels = df4["HireDate"].unique(),autopct = "%.2f",radius = 3)
| [
"sandeepjain20178@gmail.com"
] | sandeepjain20178@gmail.com |
ffaa5f02e39b29398daa68fa9fb34f9b4ddb956e | 9b11e49cbb9120f3f7e69a8884c0cee42896566d | /Hyperbola_search_part1.py | 7c39171ec6408d181e5a721ab55f1b089259ad66 | [] | no_license | albusdemens/Backscattering_3DND | e6966cd8c39342181183de20028b959227a5f570 | f911f1f9f9bf863daffe2bcc85bd7d7a94e5b3c7 | refs/heads/master | 2021-01-21T13:53:17.469879 | 2016-06-27T10:51:45 | 2016-06-27T10:51:45 | 44,485,363 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,527 | py | # Alberto Cereser, 4 Feb 2014
# alcer@fysik.dtu.dk, Technical University of Denmark
# For each point, this script looks for its successor
# For the location of the cutouts, the code follows the approach described in
# http://scikit-image.org/docs/dev/auto_examples/plot_template.html
import sys
#import pandas as pd
import numpy as np
from numpy import ndarray
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
from skimage import data
from skimage.feature import match_template
import os
if __name__ == '__main__':
# I start reading in the list of diffraction spots data made using Peak_analyzer_simple_neighbours.py
input_filename = "Fe_PSI_spt_refined.txt"
input_file = open(input_filename, 'r')
# Here are the variables to change, depending on the number of projections considered
# and on the lines of the txt file (Fe_PSI_spt_refined.txt)
Number_of_projections = 181
Number_of_lines_in_txt = 3493
counter_array = []
correlation_threshold_value = 0.7
# I make a square mask, so to ignore the central black region
X_mask = 1135
Y_mask = 1251
output_filename = ("/Users/Alberto/Documents/Data_analysis/ICON_Aug2013/Data_analysis/Python_code/Fe_PSI_spt_tagged_15apr.txt")
for i in range(2, (Number_of_projections + 1)):
cmd = 'python Hyperbola_search_part2.py %i %s %s %i %i %f' % (i, input_filename, output_filename, Number_of_projections, Number_of_lines_in_txt, correlation_threshold_value)
os.system(cmd)
# filename_cutouts_combined = ("/Users/Alberto/Documents/Data_analysis/ICON_Aug2013/Data_analysis/cutouts_combined/cutouts_combined_%03i.txt" % (i))
# filename_cutouts_combined_tag = ("/Users/Alberto/Documents/Data_analysis/ICON_Aug2013/Data_analysis/cutouts_combined/cutouts_combined_tag_%03i.txt" % (i))
# image = np.loadtxt(filename_cutouts_combined)
# image_tagged = np.loadtxt(filename_cutouts_combined_tag)
# for line in input_file:
# line_elements = line.split()
# Angle_number = int(line_elements[0])
# Omega = float(line_elements[1])
# Intensity = float(line_elements[2])
# X = float(line_elements[3])
# Y = float(line_elements[4])
# Address = str(line_elements[5])
# ID = int(line_elements[6])
# print i, Angle_number
#if ((Angle_number + 1) == i): # we map cutouts in the following
#cutout = np.loadtxt(Address)
#index = Angle_number #+ 1
#array_cutout = np.array(cutout)
#array_image = np.array(image)
#correlation = match_template(image, cutout)
#ij = np.unravel_index(np.argmax(correlation), correlation.shape)
#x, y = ij[::-1]
#ran = array_cutout.shape
#ran_y = ran[1]
#ran_x = ran[0]
#x_center = x + (ran_x/2)
#y_center = y + (ran_y/2)
#print x_center, y_center
# To do: insert case when spot in central square
# We now calculate the distance between the cutout center (in Omega) and the point we found
# (in Omega + 1)
#distance = math.sqrt((x_center - X)**2 + (y_center - Y)**2)
#print i
#if distance < 200: # We search that the two points are not too far away
# if (np.amax(correlation) > correlation_threshold_value):
# # We need now to find the cutout which is closer to the point where we located
# # The center of the cutout
# tag = image_tagged[y_center, x_center]
# else:
# tag = 0
# print distance, tag
#f = open(output_file, "a+")
#f.write("%i %f %f %f %f %s %f %i %i\n" % (Angle_number, Omega, Intensity, X, Y, Address, np.amax(correlation), ID, int(tag)))
#f.close
input_file.close() | [
"mannaro85@gmail.com"
] | mannaro85@gmail.com |
1568ebac3d05d96cad143aed5faa28b55aed2fbf | 6b6e20004b46165595f35b5789e7426d5289ea48 | /data/archivedlogs.py | 0172c74c8c3e951453a1aadc5a5a85a31982dc97 | [
"Apache-2.0"
] | permissive | anwarchk/quay | 2a83d0ab65aff6a1120fbf3a45dd72f42211633b | 23c5120790c619174e7d36784ca5aab7f4eece5c | refs/heads/master | 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 | Apache-2.0 | 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null | UTF-8 | Python | false | false | 1,038 | py | import logging
from util.registry.gzipinputstream import GzipInputStream
from flask import send_file, abort
from data.userfiles import DelegateUserfiles, UserfilesHandlers
JSON_MIMETYPE = 'application/json'
logger = logging.getLogger(__name__)
class LogArchive(object):
def __init__(self, app=None, distributed_storage=None):
self.app = app
if app is not None:
self.state = self.init_app(app, distributed_storage)
else:
self.state = None
def init_app(self, app, distributed_storage):
location = app.config.get('LOG_ARCHIVE_LOCATION')
path = app.config.get('LOG_ARCHIVE_PATH', None)
handler_name = 'web.logarchive'
log_archive = DelegateUserfiles(app, distributed_storage, location, path,
handler_name=handler_name)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['log_archive'] = log_archive
return log_archive
def __getattr__(self, name):
return getattr(self.state, name, None)
| [
"jimmy.zelinskie+git@gmail.com"
] | jimmy.zelinskie+git@gmail.com |
5b47ee2fdfef62a928d08f196124943124e29eaf | f0c402d3858f0643561886797578b1e64655b1b3 | /py/riscv/exception_handlers/EnvironmentCallHandler.py | f7d7320626d9fcd3c808a7a8d10bd79f12fbd112 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Leo-Wang-JL/force-riscv | 39ad2a72abd814df4b63879ce9825b6b06a9391a | deee6acaaee092eb90ac2538de122303334e5be3 | refs/heads/master | 2023-01-28T00:06:58.135651 | 2020-11-18T02:54:10 | 2020-11-18T02:54:10 | 271,873,013 | 0 | 0 | NOASSERTION | 2020-06-28T00:51:26 | 2020-06-12T19:15:26 | C++ | UTF-8 | Python | false | false | 7,073 | py | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base.exception_handlers.ReusableSequence import ReusableSequence
from riscv.PrivilegeLevel import PrivilegeLevelRISCV
from riscv.exception_handlers.ExceptionHandlerContext import RegisterCallRole
class EnvironmentCallHandlerRISCV(ReusableSequence):
def __init__(self, aGenThread, aFactory, aStack):
super().__init__(aGenThread, aFactory, aStack)
self.mDataBlockAddrRegIndex = None
self.mActionCodeRegIndex = None
def generateHandler(self, **kwargs):
try:
handler_context = kwargs['handler_context']
except KeyError:
self.error('INTERNAL ERROR: one or more arguments to EnvironmentCallHandlerRISCV generate method missing.')
self.debug('[EnvironmentCallHandlerRISCV] generate handler address: 0x%x' % self.getPEstate('PC'))
self.mAssemblyHelper.clearLabels('EnvironmentCallHandlerRISCV')
(_, self.mActionCodeRegIndex) = handler_context.getScratchRegisterIndices(RegisterCallRole.ARGUMENT, 2)
priv_level_reg_index = handler_context.getScratchRegisterIndices(RegisterCallRole.PRIV_LEVEL_VALUE)
scratch_reg_index = handler_context.getScratchRegisterIndices(RegisterCallRole.TEMPORARY, 1)
# Action Code 1: Return to S Mode
self.mAssemblyHelper.genMoveImmediate(scratch_reg_index, 1)
self.mAssemblyHelper.genConditionalBranchToLabel(self.mActionCodeRegIndex, scratch_reg_index, 8, 'EQ', 'RETURN_TO_S_MODE')
# Action Code 2: Load From Data Block
self.mAssemblyHelper.genMoveImmediate(scratch_reg_index, 2)
self.mAssemblyHelper.genConditionalBranchToLabel(self.mActionCodeRegIndex, scratch_reg_index, 48, 'EQ', 'LOAD_FROM_DATA_BLOCK')
# All other action codes: Skip instruction and return
self.mAssemblyHelper.genRelativeBranchToLabel(78, 'SKIP_INSTRUCTION')
self.mAssemblyHelper.addLabel('RETURN_TO_S_MODE')
self._genReturnToSMode(handler_context)
self.mAssemblyHelper.addLabel('LOAD_FROM_DATA_BLOCK')
self._genLoadRegistersFromDataBlock(handler_context)
self.mAssemblyHelper.addLabel('SKIP_INSTRUCTION')
self.mAssemblyHelper.genIncrementExceptionReturnAddress(scratch_reg_index, priv_level_reg_index)
self.mAssemblyHelper.addLabel('RETURN')
self.mAssemblyHelper.genReturn()
## Generate instructions to return to S Mode using the first data block entry as the return
# address.
#
# @param aHandlerContext The exception handler context from which register indices can be
# retrieved by role.
def _genReturnToSMode(self, aHandlerContext):
(self.mDataBlockAddrRegIndex, _) = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.ARGUMENT, 2)
priv_level_reg_index = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.PRIV_LEVEL_VALUE)
(scratch_reg_index, xstatus_reg_index, inverse_mask_reg_index) = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.TEMPORARY, 3)
for priv_level in self.mAssemblyHelper.genPrivilegeLevelInstructions(aPrivLevels=tuple(PrivilegeLevelRISCV)[1:], aInstrCountPerLevel=9, aScratchRegIndex=scratch_reg_index, aPrivLevelRegIndex=priv_level_reg_index):
self.mAssemblyHelper.genReadSystemRegister(xstatus_reg_index, ('%sstatus' % priv_level.name.lower()))
self.mAssemblyHelper.genMoveImmediate(scratch_reg_index, 1)
if priv_level == PrivilegeLevelRISCV.S:
self.mAssemblyHelper.genShiftLeftImmediate(scratch_reg_index, 8)
elif priv_level == PrivilegeLevelRISCV.M:
self.mAssemblyHelper.genShiftLeftImmediate(scratch_reg_index, 11)
self.mAssemblyHelper.genNotRegister(inverse_mask_reg_index, aSrcRegIndex=scratch_reg_index)
self.mAssemblyHelper.genAndRegister(xstatus_reg_index, inverse_mask_reg_index)
self.mAssemblyHelper.genOrRegister(xstatus_reg_index, scratch_reg_index)
self.mAssemblyHelper.genWriteSystemRegister(('%sstatus' % priv_level.name.lower()), xstatus_reg_index)
self.genInstruction('LD##RISCV', {'rd': scratch_reg_index, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 0, 'NoRestriction': 1})
self.mAssemblyHelper.genWriteSystemRegister(('%sepc' % priv_level.name.lower()), scratch_reg_index)
self.mAssemblyHelper.genRelativeBranchToLabel(52, 'RETURN')
## Generate instructions to load CSRs using values from the data block.
#
# @param aHandlerContext The exception handler context from which register indices can be
# retrieved by role.
def _genLoadRegistersFromDataBlock(self, aHandlerContext):
# The data block should hold values for the following sequence of registers: xstatus, xepc,
# satp, action code register, data block address register
(self.mDataBlockAddrRegIndex, _) = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.ARGUMENT, 2)
priv_level_reg_index = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.PRIV_LEVEL_VALUE)
scratch_reg_index = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.TEMPORARY, 1)
for priv_level in self.mAssemblyHelper.genPrivilegeLevelInstructions(aPrivLevels=tuple(PrivilegeLevelRISCV)[1:], aInstrCountPerLevel=4, aScratchRegIndex=scratch_reg_index, aPrivLevelRegIndex=priv_level_reg_index):
self.genInstruction('LD##RISCV', {'rd': scratch_reg_index, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 0, 'NoRestriction': 1})
self.mAssemblyHelper.genWriteSystemRegister(('%sstatus' % priv_level.name.lower()), scratch_reg_index)
self.genInstruction('LD##RISCV', {'rd': scratch_reg_index, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 8, 'NoRestriction': 1})
self.mAssemblyHelper.genWriteSystemRegister(('%sepc' % priv_level.name.lower()), scratch_reg_index)
self.genInstruction('LD##RISCV', {'rd': scratch_reg_index, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 16, 'NoRestriction': 1})
self.mAssemblyHelper.genWriteSystemRegister('satp', scratch_reg_index)
self.genInstruction('LD##RISCV', {'rd': self.mActionCodeRegIndex, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 24, 'NoRestriction': 1})
self.genInstruction('LD##RISCV', {'rd': self.mDataBlockAddrRegIndex, 'rs1': self.mDataBlockAddrRegIndex, 'simm12': 32, 'NoRestriction': 1})
self.mAssemblyHelper.genRelativeBranchToLabel(20, 'RETURN')
| [
"jwang1@futurewei.com"
] | jwang1@futurewei.com |
e00fbc256f3ed06f3244641137c2c4a0e5b73e33 | 05d692469305dd1adb9ebc46080525bb4515b424 | /Exception handling/aritherror2.py | c75466829af775917e307f139c966ffebef05188 | [] | no_license | rajdharmkar/pythoncode | 979805bc0e672f123ca1460644a4bd71d7854fd5 | 15b758d373f27da5680a711bf12c07e86758c447 | refs/heads/master | 2020-08-07T18:30:55.575632 | 2019-10-14T12:46:09 | 2019-10-14T12:46:09 | 213,551,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | try:
a = 0 / 19
print a
except ArithmeticError:
print "This statement is raising an exception"
else:
print "Welcome"
#syntax
#
# try:
# #run code
# except exception/error name1:
# #run code
# except exception error name2:
# # run code
# else:
# # run code | [
"rajdharmkar@gmail.com"
] | rajdharmkar@gmail.com |
98cd0f98537d4de5abe64aee34a9cc391d8459f8 | 106ddccf8f19ca2dcdde9bc455a230f144222493 | /remoview/settings.py | 63e6d00fb8b857afbe3a1a4d5facf7637551f70d | [] | no_license | Simeon2001/dsc-backend-project | b7cea249bf0855af53fd1e189371474bfeeec590 | 96069df96c22973ce00ace9d043475ff326086ab | refs/heads/main | 2023-01-09T08:57:04.846997 | 2020-11-12T16:38:16 | 2020-11-12T16:38:16 | 312,234,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | """
Django settings for remoview project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fetw8yeqk_m&738-5s^#3+h2x*!1yag@%8&9xw3l)1^9b*o(&)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','removieww.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'movie',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'remoview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'remoview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'movie/static'),
)
if os.getcwd()=='/app':
DEBUG=False | [
"jesusanyasimeon@gmail.com"
] | jesusanyasimeon@gmail.com |
4bb6112541cc85b424d4faf0558bc75faaa26289 | 670f4ba8ded99b420c3454c6ae35789667880cc8 | /tobiko/openstack/openstackclient/_port.py | b08f02ffe1daafec91e5b874d0db5f8185c53ddc | [
"Apache-2.0"
] | permissive | FedericoRessi/tobiko | 892db522198ab48380892138459d801c4bd00efa | ce2a8734f8b4203ec38078207297062263c49f6f | refs/heads/master | 2022-07-26T22:52:10.273883 | 2022-07-20T20:04:43 | 2022-07-20T20:04:43 | 145,856,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | # Copyright (c) 2020 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from tobiko.openstack.openstackclient import _client
def port_list(*args, **kwargs):
cmd = 'openstack port list {params}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_show(port, *args, **kwargs):
cmd = f'openstack port show {{params}} {port}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_create(port_name, network_name, *args, **kwargs):
cmd = f'openstack port create {{params}} --network {network_name} '\
f'{port_name}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_delete(ports, *args, **kwargs):
cmd = f'openstack port delete {{params}} {" ".join(ports)}'
return _client.execute(cmd, *args, **kwargs)
def port_set(port, *args, **kwargs):
cmd = f'openstack port set {{params}} {port}'
return _client.execute(cmd, *args, **kwargs)
def port_unset(port, *args, **kwargs):
cmd = f'openstack port unset {{params}} {port}'
return _client.execute(cmd, *args, **kwargs)
| [
"fressi@redhat.com"
] | fressi@redhat.com |
d124e275a16bc01dc86309b27e0b3fe2746089f4 | 9aa5c69ec4ae4c1a54028f6add3327929d81b929 | /nodes/camnode | c8c7ba947819137cd6d9dda5703fa04f085ab012 | [] | no_license | strawlab/ros_flydra | 818fed6cdc9ed8afd17950ea5de7595e91a0483e | 0a922c24235d971b665461db6151f5867eee5870 | refs/heads/master | 2021-01-01T16:06:00.097118 | 2018-01-19T16:52:20 | 2018-01-19T16:52:20 | 4,662,078 | 0 | 2 | null | 2015-04-17T11:46:43 | 2012-06-14T10:40:47 | null | UTF-8 | Python | false | false | 635 | #!/usr/bin/env python
import threading
try:
import flydra.camnode
except ImportError:
import sys
import os.path
sys.path.insert(0, os.path.expanduser("~/flydra.git"))
import flydra.camnode
import roslib; roslib.load_manifest('rospy')
import rospy
def main():
rospy.init_node('flydra_camnode')
spinthread = threading.Thread(target=rospy.spin)
spinthread.setDaemon(True)
spinthread.start()
flydra.camnode.main(
rospy_init_node=False, #we have already done that
cmdline_args=rospy.myargv()[1:]
)
rospy.signal_shutdown("quit")
if __name__ == '__main__':
main()
| [
"john.stowers@gmail.com"
] | john.stowers@gmail.com | |
15713a4aa17b0af607f4967edc241d1f1688c313 | b2403817f9221ee3550130572a808194ef4f3fda | /OOP/Polymorphism/SuperMethod.py | 660f0501f83896f9d29e77f3e7dd6ac1d1370e5a | [] | no_license | xaviergoby/Python-Data-Structure | e962444ef5b1313c3facbf1fcc315af182b73a26 | eaaf31ea98d63e812a75c1d6ecb8722b9c0cf142 | refs/heads/master | 2020-04-13T00:24:40.896592 | 2018-11-27T11:51:36 | 2018-11-27T11:51:36 | 162,844,732 | 1 | 0 | null | 2018-12-22T21:46:29 | 2018-12-22T21:46:29 | null | UTF-8 | Python | false | false | 873 | py | class SomeBaseClass(object):
def __init__(self):
print('SomeBaseClass.__init__(self) called')
class UnsuperChild(SomeBaseClass):
def __init__(self):
print('Child.__init__(self) called')
SomeBaseClass.__init__(self)
class SuperChild(SomeBaseClass):
def __init__(self):
print('SuperChild.__init__(self) called')
super(SuperChild, self).__init__()
s = SuperChild()
print s
u = UnsuperChild()
# print "**"
print u
# print "****"
class InjectMe(SomeBaseClass):
def __init__(self):
print('InjectMe.__init__(self) called')
super(InjectMe, self).__init__()
class UnsuperInjector(UnsuperChild, InjectMe): pass
class SuperInjector(SuperChild, InjectMe): pass
print "-----------------"
x = SuperInjector()
#x.mro
y = UnsuperInjector()
print "MRO.."
print SuperInjector.mro()
print UnsuperInjector.mro() | [
"sanjay.siddha3@gmail.com"
] | sanjay.siddha3@gmail.com |
694bd1b545d6e74fa955877bac704efbbedcc3d4 | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/phonenumbers/shortdata/region_JO.py | 10652f5720029f060d27895417cf44fbe7fee355 | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 860 | py | """Auto-generated file, do not edit by hand. JO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_JO = PhoneMetadata(id='JO', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[19]\\d{2,4}', possible_length=(3, 5)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|9[127])|911', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:09|1[0-2]|9[0-24-79])|9(?:0903|11|8788)', example_number='111', possible_length=(3, 5)),
carrier_specific=PhoneNumberDesc(national_number_pattern='9(?:0903|8788)', example_number='90903', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='9(?:0903|8788)', example_number='90903', possible_length=(5,)),
short_data=True)
| [
"gruzdevasch@gmail.com"
] | gruzdevasch@gmail.com |
d7764b7bc2f3043d08cfc2717d91e64ea6735c41 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /142/142.linked-list-cycle-ii.234403149.Accepted.leetcode.py | f353efaede5f020953c36a7ea4c336d0ce14af66 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class Solution(object):
def detectCycle(self, head):
if not head or not head.next:
return None
q1 = head
q2 = head.next
while q1 != q2:
if not q2 or not q2.next:
return None
q1 = q1.next
q2 = q2.next.next
res = head
q1 = q1.next
while res != q1:
res = res.next
q1 = q1.next
return res
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
a542c28168e1d849dfb3e5a94a62c3bf549828d5 | 92b8b1b0914a1bb4f6571bf7c3597ac33dbc58aa | /MLG-Crypto_90/solution.py | ad3cf3c7935532127e6bd9a25f91be9243680449 | [] | no_license | leogemetric/cryptoctf-2016 | 0cb5e5179125d88f8ad14bc3c29ff5aeaeac7787 | 69b02f0bbfa941d04a2f9af101d1420a096f2437 | refs/heads/master | 2021-01-19T07:18:53.908002 | 2016-05-27T22:33:02 | 2016-05-27T22:33:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | lines = open("mlg_crypto.txt", "r").readlines()[1:-1]
subs = {}
for line in lines:
line = line.strip()
for word in line.split("_"):
if word in subs:
subs[word] += 1
else:
subs[word] = 1
print len(subs)
print subs
space = max(subs, key=lambda x: subs[x])
del subs[space]
total = "\n".join(lines)
total = total.replace(space, " ") # Most common character is " "
# Do some bs substitutions
alphabet = "abcdefghijklmnopqrstuvwxyz"
i = 0
for sub in subs:
total = total.replace(sub, alphabet[i])
i += 1
total = total.replace("_", "").replace("\n\n", "\n")
print total
# Given the hint, we need to crack the substitution cipher on the ciphertext.
# This script will assign each word a letter and print it out
# Plugging the output into quipqiup, we kinda decode the message and the flag:
# flag{leet_smoked_memes_bro}
| [
"jameswang9909@hotmail.com"
] | jameswang9909@hotmail.com |
9d7f40b1e1cb4a8d9e542a673113d94d0418e724 | c43fbcb4442428e85616f664964d1e27ca396070 | /runs/malte/snr_study/simparamsec.py | 7a5a04938a59f165f1b98855287c2f20abe47592 | [] | no_license | megalut/megalut | ddac89a0dca70e13979d31b80d52233226233ade | 63bd4bec8000ad13f4963d464d7b7b4d470a36ab | refs/heads/master | 2020-04-15T00:33:42.815988 | 2018-09-11T08:45:48 | 2018-09-11T08:45:48 | 20,882,727 | 2 | 1 | null | 2018-09-11T08:45:49 | 2014-06-16T11:39:14 | Python | UTF-8 | Python | false | false | 3,043 | py | import megalut.sim
import numpy as np
import random # np.random.choice is only available for newer numpys...
import itertools
class Simple1(megalut.sim.params.Params):
"""
No PSF, just round Gaussians, but with Euclid zeropoint and sky level etc.
"""
def __init__(self):
megalut.sim.params.Params.__init__(self)
def stat(self):
"""
stat: called for each catalog (stat is for stationnary)
"""
return {"snc_type":1}
def draw(self, ix, iy, nx, ny):
"""
draw: called for each galaxy
"""
######### No Lensing
tru_s1 = 0.0
tru_s2 = 0.0
tru_mu = 1.0
# Params
gain = 3.1 # electrons/ADU
ron = 4.2 # electrons
skyback = 22.35 # mag per arcsec2, dominated by zodiacal light
# Don't look at sextractor outptu if you do this!
zeropoint = 24.0 + float(ny - iy)/float(ny) * 1.0 # mag. Should give SNR 10 when observing with 3 x 565 second exposures.
#zeropoint = 24.9
exptime = 3.0*565.0 # seconds
########## Noise ##########
tru_sky_level = 0.01 * (exptime/gain) * 10**(-0.4*(skyback - zeropoint)) # In ADU per pixel. 0.01 because of the pixel size of 0.1 arcsec.
tru_gain = gain
tru_read_noise = ron
######### No Lensing
tru_s1 = 0.0
tru_s2 = 0.0
tru_mu = 1.0
########## Galaxy ##########
tru_type = 0 # 0 Gaussian, 1 sersic
tru_mag = 24.5
#tru_mag = 23.0 + float(ny - iy)/float(ny) * 2.5
tru_flux = (exptime / gain) * 10**((tru_mag - zeropoint)/(-2.5))
tru_rad = 4.3/2.0
# Croppers reference galaxy has an extension of 4.3 pixels, but we don't know exactly what this extension means.
size_factor = 1.0 # scales the galaxy with respect to Croppers reference
tru_sigma = size_factor * tru_rad / 1.1774 # We take Croppers "extension of the source" as the half-light-diameter
tru_cropper_snr = (tru_flux) / np.sqrt( np.pi * (size_factor * 13.0/2.0)**2 * tru_sky_level) # For a sky-limited obs, we don't use the gain here
tru_g = 0.0
tru_theta = 0.0
(tru_g1, tru_g2) = (tru_g * np.cos(2.0 * tru_theta), tru_g * np.sin(2.0 * tru_theta))
tru_g = np.hypot(tru_g1, tru_g2)
return { # It's ugly to not directly fill this dict, but this makes it clearer what is actually returned:
"tru_type":tru_type,
"tru_flux":tru_flux,
"tru_mag":tru_mag,
"zeropoint":zeropoint,
"skyback":skyback,
"tru_rad":tru_rad,
"tru_sigma":tru_sigma,
#"tru_sersicn":tru_sersicn,
"tru_g1":tru_g1,
"tru_g2":tru_g2,
"tru_g":tru_g, # only useful for some plots
"tru_sky_level":tru_sky_level, # in ADU, just for generating noise, will not remain in the image
"tru_gain":tru_gain, # in photons/ADU. Make this negative to have no Poisson noise
"tru_read_noise":tru_read_noise, # in photons if gain > 0.0, otherwise in ADU.Set this to zero to have no flat Gaussian noise
"tru_s1":tru_s1,
"tru_s2":tru_s2,
"tru_mu":tru_mu,
#"tru_psf_sigma":2.0,
#"tru_psf_g1":0.0,
#"tru_psf_g2":0.0,
"tru_cropper_snr":tru_cropper_snr,
}
| [
"malte.tewes@gmail.com"
] | malte.tewes@gmail.com |
2ff4be473a4847f049f4cc30b5101abf4ed13ff8 | cc64d03b132b773acae845c52f41fcdcdcaee273 | /test/functional/wallet_coinbase_category.py | 15e59fad3c5890d950270402edb2f1939d3bf62f | [
"MIT"
] | permissive | phlsolo316/vidcoin | aa9aae1e0f2215edadd2df89e1c9b6669abbce76 | d6eec232378c329ebc2a31e7d21acf58cf62368d | refs/heads/main | 2023-05-26T05:01:32.379060 | 2021-06-07T02:46:07 | 2021-06-07T02:46:07 | 373,622,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinbase transactions return the correct categories.
Tests listtransactions, listsinceblock, and gettransaction.
"""
from test_framework.test_framework import VIDCoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(VIDCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
# Generate one block to an address
address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
# Coinbase transaction is immature after 1 confirmation
self.assert_category("immature", address, txid, 0)
# Mine another 99 blocks on top
self.nodes[0].generate(99)
# Coinbase transaction is still immature after 100 confirmations
self.assert_category("immature", address, txid, 99)
# Mine one more block
self.nodes[0].generate(1)
# Coinbase transaction is now matured, so category is "generate"
self.assert_category("generate", address, txid, 100)
# Orphan block that paid to address
self.nodes[0].invalidateblock(hash)
# Coinbase transaction is now orphaned
self.assert_category("orphan", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
| [
"36169687+blockinator@users.noreply.github.com"
] | 36169687+blockinator@users.noreply.github.com |
d114bd12241b8318364df59f4a6569dd709ee16c | 6131b2738a7c087dfa6907c624453576f6f0e393 | /flask_project/fish/app/spider/yushu_book.py | 027eea09c9285f9ff2cb4b25722aa5d35719a702 | [] | no_license | heheddff/myPythonProcess | 60ef240130cd02906dc500eedb397a9662c02e5a | 885a25dd2a9cd43801306d9e70b9ce89daec4406 | refs/heads/master | 2020-04-08T19:09:18.192738 | 2019-08-06T02:52:54 | 2019-08-06T02:52:54 | 159,642,468 | 4 | 5 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | from app.libs.download import DOWNLOAD
from flask import current_app
class YuShuBook:
# isbn_url = 'https://api.douban.com/v2/book/isbn/{}'
# keyword_url = 'https://api.douban.com/v2/book/search?q={}&count={}&start={}'
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&count={}&start={}'
per_page = 15
@classmethod
def search_by_isbn(cls, isbn):
url = cls.isbn_url.format(isbn)
res = DOWNLOAD.get(url)
return res
@classmethod
def search_by_keyword(cls, keyword, page=1):
url = cls.keyword_url.format(keyword,current_app.config['PER_PAGE'],cls.calaculed_start(page))
res = DOWNLOAD.get(url)
return res
@staticmethod
def calaculed_start(page):
return (page-1)*current_app.config['PER_PAGE'] | [
"qq2003qq@126.com"
] | qq2003qq@126.com |
f00aab7f8fb74e06325b57eac9054ee1eee7131a | 6ed9e72d2676447dcb68683d8823712110bb3b5e | /setup.py | b8e64f5229ce5f874831ed45e3a972b29e0365eb | [] | no_license | brandon-rhodes/python-johnhancock | 79a416e037817ee41fa384797a001ee6fcfa31b5 | 43b1128a9c217fad5ba6c6143cdbea97d6e44e51 | refs/heads/master | 2023-07-21T17:30:09.358965 | 2020-07-01T00:34:43 | 2020-07-01T00:34:43 | 6,408,682 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | from distutils.core import setup
setup(
name='johnhancock',
version='0.1',
description='Sign a PDF using a PNG image',
author='Brandon Rhodes',
author_email='brandon@rhodesmill.org',
#url='',
packages=['johnhancock'],
install_requires=['Pillow', 'pyPdf', 'reportlab'],
)
| [
"brandon@rhodesmill.org"
] | brandon@rhodesmill.org |
26bc6917e32b1470a9c5e17be693dbd5ee407aea | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2560/60580/313785.py | 220666b21e9e0726571f13ffcbd14a6e29126aca | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | size = int(input())
a = 0
while a < size:
b = input()
list = input().split()
num = int(input())
i = 0
d = {}
while i < len(list):
if (list[i] in d):
d[list[i]] = d[list[i]] + 1
else:
d[list[i]] = 1
i = i + 1
dict = sorted(d.items(), key=lambda item: item[1])
h = 0
for key, value in d.items():
if (num >= value):
num = num - value
else:
h = h + 1
print(h)
a = a + 1 | [
"1069583789@qq.com"
] | 1069583789@qq.com |
cc3722d0a6ada9953627c61b49f3cfed650d7810 | cb1d6bd2bf5edb6e38a9094b6a003f8c8d6c01e8 | /carrent/myapp/migrations/0001_initial.py | d3efa695ec10d0db57d17a7ac52e39711a0794cf | [] | no_license | wasit7/cs459_2018s | b3d8444f2c697c1211ba949634e132af1692f7bd | dee7edcefd0964e342bf958bf91314e3c27b4be6 | refs/heads/master | 2020-04-19T15:20:43.543867 | 2019-02-20T05:01:39 | 2019-02-20T05:01:39 | 168,271,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | # Generated by Django 2.1.5 on 2019-02-20 04:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('brand', models.CharField(max_length=10)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('purchasing_date', models.DateField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=20)),
('lastname', models.CharField(max_length=20)),
('dob', models.DateField()),
('tel', models.CharField(max_length=10)),
],
),
]
| [
"wasit7@gmail.com"
] | wasit7@gmail.com |
d7a4a0f9aeec15da14a66873cdfff142ccbe3ea8 | 70e79590cd66ba1cd37ff977890164baa3d5c53c | /blog/migrations/0002_blogpage.py | cb87b9ef8a74366436525193a302f6efca545ccb | [] | no_license | thibaudcolas/codersofcolour | 7206c18f9c0e31f206f7d1dccaef2df1e46f5ecf | b085d5ec14c08d04ab2439f0a79730996cb87785 | refs/heads/master | 2022-12-02T15:06:20.984555 | 2020-06-10T18:06:02 | 2020-06-10T18:06:02 | 270,047,484 | 0 | 0 | null | 2020-07-26T14:16:37 | 2020-06-06T17:06:10 | Python | UTF-8 | Python | false | false | 963 | py | # Generated by Django 3.0.7 on 2020-06-06 16:42
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date', models.DateField(verbose_name='Post date')),
('intro', models.CharField(max_length=250)),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"thibaudcolas@gmail.com"
] | thibaudcolas@gmail.com |
cb6eb73063e7f44c86ad29a66545b4d90f775ce6 | ae3d0e3c2fb614d96f6c787583c6e2e4cb654ad4 | /leetcode/118_generate.py | b1346f72c2348527aadf663326c7f5841837c9c0 | [] | no_license | Cjz-Y/shuati | 877c3f162ff75f764aa514076caccad1b6b43638 | 9ab35dbffed7865e41b437b026f2268d133357be | refs/heads/master | 2023-02-02T10:34:05.705945 | 2020-12-14T01:41:39 | 2020-12-14T01:41:39 | 276,884,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | from typing import List
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
result = []
if numRows == 0:
return result
result.append([1])
for i in range(1, numRows):
temp_array = []
for j in range(i + 1):
temp_int = 0
if j - 1 >= 0:
temp_int += result[i - 1][j - 1]
if j < i:
temp_int += result[i - 1][j]
temp_array.append(temp_int)
result.append(temp_array)
return result
| [
"cjz.y@hotmail.com"
] | cjz.y@hotmail.com |
6bb690802c91855dd0f6fa1add90a9b09c97c432 | 1e53216c58f3c7843031721305590b83dbaed3f2 | /week_four/form_practice/form_app/views.py | c648c4ca3b1982be30485cd36518e4f4e2084c59 | [] | no_license | MTaylorfullStack/python_july_20 | 991852ba12d6f06d6b93b8efc60b66ee311b5cb3 | bdfb0d9a74300f2d6743ac2d108571692ca43ad9 | refs/heads/master | 2022-12-12T18:03:00.886048 | 2020-08-27T23:53:31 | 2020-08-27T23:53:31 | 277,956,745 | 2 | 2 | null | 2023-06-30T20:06:11 | 2020-07-08T01:09:34 | Python | UTF-8 | Python | false | false | 1,469 | py | from django.shortcuts import render, redirect
import random
def index(request):
return render(request, 'form.html')
def success(request):
if "gold" not in request.session:
request.session['gold'] = 0
return render(request, "result.html")
def process(request):
print(request.POST['user_name'])
request.session['name'] = request.POST['user_name']
request.session['loc'] = request.POST['location']
request.session['lang'] = request.POST['fav_lang']
request.session['comment'] = request.POST['comment']
return redirect('/success')
def process_gold(request):
print(request.POST)
# add to some gold variable that we can view on the templates
# need to know how much gold to add, which form was submitted
if "farm" in request.POST:
gold = int(random.random() * 10 + 10)
request.session['gold'] += gold
if "cave" in request.POST:
gold = int(random.random() * 5 + 5)
request.session['gold'] += gold
if "house" in request.POST:
gold = int(random.random() * 3 + 2)
request.session['gold'] += gold
if "casino" in request.POST:
gold = int(random.random() * 50)
if int(random.random()*10) > 5:
# gain
request.session['gold'] += gold
else:
# lose
request.session['gold'] -= gold
return redirect('/success')
def reset(request):
request.session.flush()
return redirect('/')
| [
"mtaylor@codingdojo.com"
] | mtaylor@codingdojo.com |
b1c59fe04d1ba3b07bb1c5709673c2d7f7d2fd61 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/pygments/lexers/gsql.py | 6af99b27ce040871edb2972c020afb40d527b0bb | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,772 | py | """
pygments.lexers.gsql
~~~~~~~~~~~~~~~~~~~~
Lexers for TigerGraph GSQL graph query language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this, words
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace, Token
__all__ = ["GSQLLexer"]
class GSQLLexer(RegexLexer):
"""
For GSQL queries (version 3.x).
.. versionadded:: 2.10
"""
name = 'GSQL'
url = 'https://docs.tigergraph.com/dev/gsql-ref'
aliases = ['gsql']
filenames = ['*.gsql']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('accums'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
include('operators'),
],
'comment': [
(r'\#.*', Comment.Single),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
],
'keywords': [
(words((
'ACCUM', 'AND', 'ANY', 'API', 'AS', 'ASC', 'AVG', 'BAG', 'BATCH', 'BETWEEN', 'BOOL', 'BOTH',
'BREAK', 'BY', 'CASE', 'CATCH', 'COALESCE', 'COMPRESS', 'CONTINUE', 'COUNT',
'CREATE', 'DATETIME', 'DATETIME_ADD', 'DATETIME_SUB', 'DELETE', 'DESC', 'DISTRIBUTED', 'DO',
'DOUBLE', 'EDGE', 'ELSE', 'END', 'ESCAPE', 'EXCEPTION', 'FALSE', 'FILE', 'FILTER', 'FLOAT', 'FOREACH', 'FOR',
'FROM', 'GRAPH', 'GROUP', 'GSQL_INT_MAX', 'GSQL_INT_MIN', 'GSQL_UINT_MAX', 'HAVING', 'IF',
'IN', 'INSERT', 'INT', 'INTERPRET', 'INTERSECT', 'INTERVAL', 'INTO', 'IS', 'ISEMPTY', 'JSONARRAY', 'JSONOBJECT', 'LASTHOP',
'LEADING', 'LIKE', 'LIMIT', 'LIST', 'LOAD_ACCUM', 'LOG', 'MAP', 'MATCH', 'MAX', 'MIN', 'MINUS', 'NOT',
'NOW', 'NULL', 'OFFSET', 'OR', 'ORDER', 'PATH', 'PER', 'PINNED', 'POST_ACCUM', 'POST-ACCUM', 'PRIMARY_ID', 'PRINT',
'QUERY', 'RAISE', 'RANGE', 'REPLACE', 'RESET_COLLECTION_ACCUM', 'RETURN', 'RETURNS', 'RUN', 'SAMPLE', 'SELECT', 'SELECT_VERTEX',
'SET', 'SRC', 'STATIC', 'STRING', 'SUM', 'SYNTAX', 'TARGET', 'TAGSTGT', 'THEN', 'TO', 'TO_CSV', 'TO_DATETIME', 'TRAILING', 'TRIM', 'TRUE',
'TRY', 'TUPLE', 'TYPEDEF', 'UINT', 'UNION', 'UPDATE', 'VALUES', 'VERTEX', 'WHEN', 'WHERE', 'WHILE', 'WITH'), prefix=r'(?<!\.)', suffix=r'\b'), Token.Keyword)
],
'clauses': [
(words(('accum', 'having', 'limit', 'order', 'postAccum', 'sample', 'where')), Name.Builtin)
],
'accums': [
(words(('andaccum', 'arrayaccum', 'avgaccum', 'bagaccum', 'bitwiseandaccum',
'bitwiseoraccum', 'groupbyaccum', 'heapaccum', 'listaccum', 'MapAccum',
'maxaccum', 'minaccum', 'oraccum', 'setaccum', 'sumaccum')), Name.Builtin),
],
'relations': [
(r'(-\s?)(\(.*\:\w?\))(\s?-)', bygroups(Operator, using(this), Operator)),
(r'->|<-', Operator),
(r'[.*{}\[\]\<\>\_]', Punctuation),
],
'strings': [
(r'"([^"\\]|\\.)*"', String),
(r'@{1,2}\w+', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'(\d+\.\d+|\d+)', Number),
],
'operators': [
(r'\$|[^0-9|\/|\-](\-\=|\+\=|\*\=|\\\=|\=|\=\=|\=\=\=|\+|\-|\*|\\|\+\=|\>|\<)[^\>|\/]', Operator),
(r'(\||\(|\)|\,|\;|\=|\-|\+|\*|\/|\>|\<|\:)', Operator),
],
}
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
d87f498e4a7d959b5fc54f1785b26a4afec1578f | a8c95f5152c08b487c3f85246150f9f7cdd557e0 | /torabot/frontend/admin/auth.py | 1e1558234cd037fcc769c44fbecd2bb02d103592 | [
"MIT"
] | permissive | sorunis/torabot | b58113adab85e78551095e8f4551b0bbaf48e8f1 | 6d1a0a524f184cc33c5dfb3d7fc5e95af791a018 | refs/heads/master | 2020-12-11T07:19:54.269478 | 2014-05-04T18:15:54 | 2014-05-04T18:15:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from functools import wraps
from ...core.local import is_admin
from ..auth import require_session
from .errors import AdminAuthError
def require_admin(f):
@require_session
@wraps(f)
def inner(user_id, *args, **kargs):
if not is_admin:
raise AdminAuthError()
return f(*args, **kargs)
return inner
| [
"answeror@gmail.com"
] | answeror@gmail.com |
fa66b83ea082481837e05a8ceeb01bb4f447c3b4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02700/s830417832.py | 15b00c59d92d438ff9c194f96ea1df0e3dd49e2a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | a, b, c, d = map(int,input().split())
for i in range(100):
if c > b:
c -= b
elif c <= b:
print("Yes")
exit()
if a > d:
a -= d
elif a <= d:
print("No")
exit() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4a86fdc37d06384976bba51986fb40e2f225bded | a62390c9497363a9afbaac79336b3e1cb04e096e | /Day 23/turtle_crossing_game.py | dee8b9aae7eb48119d8fb87fe7a405c05db3634d | [] | no_license | PravinSelva5/100-Days-of-Code | 2cf3ae9766bdfd053532823214276e291024d5a2 | 3a299af3de5f4f0cab05fc73563df29e3c292560 | refs/heads/master | 2023-07-13T06:29:15.360016 | 2021-08-22T16:50:49 | 2021-08-22T16:50:49 | 380,812,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
player = Player()
car_manager = CarManager()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(player.go_up, "Up")
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
car_manager.create_car()
car_manager.move_cars()
# Detect collision with car
for car in car_manager.all_cars:
if car.distance(player) < 20:
game_is_on = False
scoreboard.game_over()
# Detect a successful crossing
if player.is_at_finish_line():
player.go_to_start()
car_manager.next_level()
scoreboard.increase_level()
screen.exitonclick()
| [
"pravin.selvarajah.eng@gmail.com"
] | pravin.selvarajah.eng@gmail.com |
0d89b4421f4197551bd71933e7e38fc0a07c5a69 | 012837eafe45c8f7ee5fc77d4c4d7725d5314c5c | /workshops/9-section/5-clazz.py | 387fdf5912dcfa0b0f19553d2a5d7c3b235a1fe0 | [
"MIT"
] | permissive | ai-erorr404/opencv-practice | e9408cf006779a678cf3a30fc60e9dbeb3c8e493 | 60ef5e4aec61ee5f7e675fb919e8f612e59f664a | refs/heads/master | 2021-02-08T11:17:04.763522 | 2020-02-22T09:43:04 | 2020-02-22T09:43:04 | 244,146,060 | 1 | 1 | MIT | 2020-03-01T12:35:02 | 2020-03-01T12:35:01 | null | UTF-8 | Python | false | false | 2,927 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
from goto import with_goto
import math
import numpy as np
"""
KLT光流跟踪法二:
静止点删除与跟踪轨迹绘制。处置流程为 输入第一帧图像 -> 特征点检测 -> 保持特征点 -> 输入第二帧图像(开始跟踪) -> 跟踪特征点 -> 删除
损失特征点 -> 保存跟踪特征点 -> 用第二帧图像替换第一帧图像 -> 用后续输入帧替换第二帧 -> 选择新的特征点替换损失的特征点 -> 保存特征点数据
并回到输入第二帧图像,开始循环。
"""
MAX_CORNERS = 100
features_params = dict(maxCorners=MAX_CORNERS, qualityLevel=0.01, minDistance=10, blockSize=3, mask=None)
lk_params = dict(nextPts=None, winSize=(31, 31), maxLevel=3,
criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 30, 0.01))
color_set = np.random.randint(0, 255, (MAX_CORNERS, 3))
# points = []
@with_goto
def main():
capture = cv.VideoCapture("../../../raspberry-auto/pic/vtest.avi")
ret, frame = capture.read()
if True is not ret:
print("can't read any video")
goto .end
prv_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
prv_frame = cv.medianBlur(prv_frame, 3)
prv_corners = cv.goodFeaturesToTrack(prv_frame, **features_params)
# points += prv_corners
while True:
ret, frame = capture.read()
if True is not ret:
print("can't read next frame.")
break
next_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
next_frame = cv.medianBlur(next_frame, 3)
next_corners, status, err = cv.calcOpticalFlowPyrLK(prv_frame, next_frame, prv_corners, **lk_params)
old_pts = prv_corners[1 == status]
new_pts = next_corners[1 == status]
for i, (older, newer) in enumerate(zip(old_pts, new_pts)):
a, b = older.ravel()
c, d = newer.ravel()
width = math.pow(abs(a - c), 2)
height = math.pow(abs(b - d), 2)
hypotenuse = math.sqrt(width + height)
if 2 < hypotenuse:
cv.circle(frame, (c, d), 5, color_set[i].tolist(), -1)
cv.line(frame, (c, d), (a, b), color_set[i].tolist(), 2, cv.LINE_8)
# else:
# new_pts.remove(older)
# if 40 > len(new_pts):
# next_corners, status, err = cv.calcOpticalFlowPyrLK(prv_frame, next_frame, prv_corners, **lk_params)
# new_pts = next_corners[1 == status]
cv.imshow("frame", frame)
key = cv.waitKey(30) & 0xff
if 27 == key:
break
# 更新前一帧的内容
prv_frame = next_frame.copy()
prv_corners = new_pts.reshape(-1, 1, 2)
label .end
capture.release()
cv.destroyAllWindows()
if "__main__" == __name__:
main()
| [
"afterloe@foxmail.com"
] | afterloe@foxmail.com |
a149db09fb1a3483f75201ee41bba85f77be8210 | 8999b8522b18a52d09e1c76d28ee77f0d022e8fd | /pyrarcrack/pyrarcrack.py | 1e8a4da42a11f4804de1c921b09bf62577372f1e | [
"Apache-2.0"
] | permissive | abhushansahu/py-rarcrack | 7bbba825e7e9794b538818e3f1958758f58ca767 | 4326da72d9a351f1e64c8657cacada08bdbada0b | refs/heads/master | 2021-01-14T16:04:03.282548 | 2019-10-27T18:09:40 | 2019-10-27T18:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,151 | py | #!/usr/bin/python
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
__author__='''
Suraj Singh
surajsinghbisht054@gmail.com
http://bitforestinfo.blogspot.in/
'''
# =================Other Configuration================
# Usages :
usage = "usage: %prog [options] "
# Version
Version="%prog 0.0.1"
# ====================================================
print __author__
# Import Modules
import rarfile,optparse,sys,fileinput,time
class main:
def __init__(self):
self.extract_input_data()
self.check_input_conditions()
self.start_cracking_engine()
def time_management(self):
print "[*] Starting Time ",self.starttime
print "[*] Closing Time ",self.closetime
print "[*] Password Try ",self.pwdtries
print "[*] Average Speed ",self.pwdtries/(self.closetime-self.starttime)
return
def start_cracking_engine(self):
print "[+] Loading rarfile... ",
fileload=rarfile.RarFile(self.filename)
print "OK"
if self.dictionery:
print "[+] Using Dictonery Option.... OK"
print "[+] Loading Dictonery File... OK"
print "[+] Brute Force Started ..."
for i in fileinput.input(self.dictionery):
pwd=i.strip('\n')
self.extracting_engine(fileload,pwd)
if self.crunch:
print "[+] Connection Stablished as Pipe... OK"
print "[+] Brute Force Started ..."
for i in sys.stdin:
pwd=i.strip('\n')
self.extracting_engine(fileload,pwd)
self.show_info_message()
return
def check_input_conditions(self):
if not self.filename:
print "[ Error ] Please Provide Rar File Path "
sys.exit(0)
print "[+] Checking Rar File Condition ...",
if not rarfile.is_rarfile(self.filename):
print "[ Error ] Bad Rar file"
sys.exit(0)
print " Ok"
if not self.dictionery and not self.crunch:
print "[ Error ] Please Provide Dictonery Or Crunch Or Password Option"
sys.exit(0)
if self.dictionery and self.crunch:
print "[ Error ] Please Choose Any One Option From Dict or Crunch"
sys.exit(0)
return
def extracting_engine(self,file,pwd):
self.pwdresult=None
try:
file.extractall(self.output,pwd=str(pwd))
self.show_info_message(pwd=pwd)
self.pwdresult=True
except Exception as e:
if str(e).find('Permission')!=-1:
self.show_info_message(pwd=pwd)
self.pwdresult=True
else:
self.pwdresult=None
self.pwdtries=self.pwdtries+1
return
def show_info_message(self,pwd=None):
if pwd:
data="\n\t !-Congratulation-! \n\t\tPassword Found = "+pwd+'\n'
else:
data="\n\t Sorry! Password Not Found \n\n"
print data
if self.result:
print "[+] Saving Output in ",self.result
f=open(self.result,'a')
f.write(data)
f.close()
self.closetime=time.time()
self.time_management()
if pwd:
print "[+] Exiting..."
sys.exit(0)
return
def extract_input_data(self):
self.starttime=time.time()
self.pwdtries=0
# Extracting Function
parser = optparse.OptionParser(usage, version=Version)
parser.add_option("-f", "--file", action="store", type="string", dest="filename",help="Please Specify Path of Rar File", default=None)
parser.add_option("-d", "--dict", action="store", type="string", dest="dictionery", help="Please Specify Path of Dictionery.", default=None)
parser.add_option("-o", "--output", action="store", type="string", dest="output", help="Please Specify Path for Extracting", default='.')
parser.add_option("-r", "--result", action="store", type="string", dest="result", help="Please Specify Path if You Want to Save Result", default=None)
parser.add_option("-c", "--crunch", action="store", type="string", dest="crunch", help="For Using Passwords Directly from crunch use this arguments: -c True or --crunch True", default=None)
(option, args)=parser.parse_args()
# Record Inputs Data
print "[+] Extracting Input Data..."
self.filename=option.filename
self.dictionery=option.dictionery
self.output=option.output
self.result=option.result
self.crunch=option.crunch
return
if __name__ == '__main__':
main()
| [
"surajsinghbisht054@gmail.com"
] | surajsinghbisht054@gmail.com |
250bd77824bcedf7034eb92f335b65c75371bac9 | c6abddbc632b2362db0817aeab89387ea6a92902 | /qiskit/extensions/standard/ry.py | 5ac582759d10f05f7b9c33e624e57ba90dae61a4 | [
"Apache-2.0"
] | permissive | azulehner/qiskit-sdk-py | 10c3c8d5e198e06e668d356bb78a98b279c8b3b8 | 138484e41eb8bd504f3b6977e267efdd0d9f208b | refs/heads/master | 2021-05-12T19:59:23.051113 | 2018-01-11T09:12:18 | 2018-01-11T09:12:18 | 117,109,018 | 2 | 0 | null | 2018-01-11T14:11:08 | 2018-01-11T14:11:07 | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Rotation around the y-axis.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import InstructionSet
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class RYGate(Gate):
"""rotation around the y-axis."""
def __init__(self, theta, qubit, circ=None):
"""Create new ry single qubit gate."""
super(RYGate, self).__init__("ry", [theta], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
theta = self.param[0]
return self._qasmif("ry(%s) %s[%d];" % (theta, qubit[0].name,
qubit[1]))
def inverse(self):
"""Invert this gate.
ry(theta)^dagger = ry(-theta)
"""
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.ry(self.param[0], self.arg[0]))
def ry(self, theta, q):
"""Apply ry to q."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.sz):
gs.add(self.ry(theta, (q, j)))
return gs
self._check_qubit(q)
return self._attach(RYGate(theta, q, self))
QuantumCircuit.ry = ry
CompositeGate.ry = ry
| [
"diego.plan9@gmail.com"
] | diego.plan9@gmail.com |
0185585a72a6d43205d0b55e5098876e40118a49 | 1726f4c11106d09313324d12d274705540baa9f4 | /server/apps/rooms/urls.py | 56e2c3207ecacd33289f58af7f389a47a8768e7b | [] | no_license | AlAstroMoody/chat | 3c1f33a343c72836867587200abbe2adedf0bbc4 | 7a528d62ccf5e4ed1d478a6479e41d37d08b87f8 | refs/heads/main | 2023-03-25T04:29:55.547258 | 2021-03-15T02:30:19 | 2021-03-15T02:30:19 | 347,811,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from django.urls import path
from .views import RoomView
from .views import ReportView
rooms_urlpatterns = [
path('room/', RoomView.as_view(), name='room'),
path('room/<uuid:uuid>-<str:token>/report/', ReportView.as_view(), name='report'),
]
| [
"aastrotenko@mail.ru"
] | aastrotenko@mail.ru |
232ae1fa95eccd3f919594407f942bf53ac1636b | 6c3dbc51b19ddd21c389de79c29fa3706fc44733 | /models/cnn_bilstm_attention.py | 823f648be7ed06d81bfbd1e5e92a28a9c426eb31 | [
"MIT"
] | permissive | Eurus-Holmes/Tumor2Graph | 7db12920a21b1b8609087fd9d7ceb245420cb536 | 6e52748d8cd2e8fe33092e2c67e92e6454a964b3 | refs/heads/main | 2023-07-07T22:15:17.869378 | 2021-09-02T03:31:46 | 2021-09-02T03:31:46 | 390,660,583 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,951 | py | from torch import nn
import torch
import torch.nn.functional as F
class TextCNN(nn.Module):
def __init__(self, feaSize, contextSizeList, filterNum, name='textCNN'):
super(TextCNN, self).__init__()
self.name = name
moduleList = []
for i in range(len(contextSizeList)):
moduleList.append(
nn.Sequential(
nn.Conv1d(in_channels=feaSize, out_channels=filterNum, kernel_size=contextSizeList[i], padding=contextSizeList[i]//2)
)
)
self.conv1dList = nn.ModuleList(moduleList)
self.process=nn.Sequential(nn.BatchNorm1d(filterNum), nn.ReLU())
def forward(self, x):
# x: batchSize × seqLen × feaSize
x = x.transpose(1,2) # => batchSize × feaSize × seqLen
x = [conv(x) for conv in self.conv1dList] # => scaleNum * (batchSize × filterNum × seqLen)
x_new=[self.process(new) for new in x]
pooler=[torch.max(new,dim=2)[0] for new in x]
pooling=[torch.relu(pool) for pool in pooler]
return torch.cat(x_new, dim=1).transpose(1,2),torch.cat(pooling,dim=1) # => batchSize × seqLen × scaleNum*filterNum
class Simple_Protein_Predict(nn.Module):
def __init__(self,lstm_hidden_size,lstm_hidden_size_1,contextSizeList,filter_number):
super(Simple_Protein_Predict, self).__init__()
self.lstm_hidden_size = lstm_hidden_size
self.lstm_hidden_size_1 =lstm_hidden_size_1
self.embed_matrix=nn.Embedding(22,64)
self.lstm = nn.LSTM(len(contextSizeList)*filter_number,self.lstm_hidden_size,num_layers=2,batch_first=True,bidirectional=True)
self.embed_matrix1 = nn.Embedding(22, 128)
self.lstm1 = nn.LSTM(len(contextSizeList)*filter_number, self.lstm_hidden_size_1, num_layers=2,batch_first=True,bidirectional=True)
self.dropout=nn.Dropout(.2)
self.predict = nn.Linear(4 * self.lstm_hidden_size_1, 4 * self.lstm_hidden_size)
self.linear=nn.Linear(self.lstm_hidden_size+self.lstm_hidden_size_1+2*filter_number*len(contextSizeList),2 * self.lstm_hidden_size)
self.final_linear=nn.Linear(2 * self.lstm_hidden_size,1)
self.activation = nn.Sigmoid()
self.attention_layer1 = nn.Sequential(
nn.Linear(self.lstm_hidden_size_1, self.lstm_hidden_size_1),
nn.ReLU(inplace=True))
self.attention_layer = nn.Sequential(
nn.Linear(self.lstm_hidden_size, self.lstm_hidden_size),
nn.ReLU(inplace=True))
self.textCNN = TextCNN(64 , contextSizeList, filter_number)
self.textCNN_second = TextCNN(128 , contextSizeList, filter_number)
def exponent_neg_manhattan_distance(self, x1, x2):
''' Helper function for the similarity estimate of the LSTMs outputs '''
return torch.exp(-torch.sum(torch.abs(x1 - x2), dim=1))
def attention_net_with_w(self, lstm_out, lstm_hidden):
'''
:param lstm_out: [batch_size, len_seq, n_hidden * 2]
:param lstm_hidden: [batch_size, num_layers * num_directions, n_hidden]
:return: [batch_size, n_hidden]
'''
lstm_tmp_out = torch.chunk(lstm_out, 2, -1)
# h [batch_size, time_step, hidden_dims]
h = lstm_tmp_out[0] + lstm_tmp_out[1]
# [batch_size, num_layers * num_directions, n_hidden]
lstm_hidden = torch.sum(lstm_hidden, dim=1)
# [batch_size, 1, n_hidden]
lstm_hidden = lstm_hidden.unsqueeze(1)
# atten_w [batch_size, 1, hidden_dims]
atten_w = self.attention_layer(lstm_hidden)
# [batch_size, time_step, hidden_dims]
m = nn.Tanh()(h)
# atten_context [batch_size, 1, time_step]
atten_context = torch.bmm(atten_w, m.transpose(1, 2))
# softmax_w [batch_size, 1, time_step]
softmax_w = F.softmax(atten_context, dim=-1)
# context [batch_size, 1, hidden_dims]
context = torch.bmm(softmax_w, h)
result = context.squeeze(1)
return result
def attention_net_with_w_virus(self, lstm_out, lstm_hidden):
'''
:param lstm_out: [batch_size, len_seq, n_hidden * 2]
:param lstm_hidden: [batch_size, num_layers * num_directions, n_hidden]
:return: [batch_size, n_hidden]
'''
lstm_tmp_out = torch.chunk(lstm_out, 2, -1)
# h [batch_size, time_step, hidden_dims]
h = lstm_tmp_out[0] + lstm_tmp_out[1]
# [batch_size, num_layers * num_directions, n_hidden]
lstm_hidden = torch.sum(lstm_hidden, dim=1)
# [batch_size, 1, n_hidden]
lstm_hidden = lstm_hidden.unsqueeze(1)
# atten_w [batch_size, 1, hidden_dims]
atten_w = self.attention_layer1(lstm_hidden)
# [batch_size, time_step, hidden_dims]
m = nn.Tanh()(h)
# atten_context [batch_size, 1, time_step]
atten_context = torch.bmm(atten_w, m.transpose(1, 2))
# softmax_w [batch_size, 1, time_step]
softmax_w = F.softmax(atten_context, dim=-1)
# context [batch_size, 1, hidden_dims]
context = torch.bmm(softmax_w, h)
result = context.squeeze(1)
return result
def forward(self, seq_1, seq_2, label):
output1 = self.embed_matrix(seq_1)
conv1, pooling_abs = self.textCNN(output1)
orgin_output, output1 = self.lstm(conv1)
# final_hidden_state : [batch_size, num_layers * num_directions, n_hidden]
final_hidden_state = output1[0].permute(1, 0, 2)
atten_out = self.attention_net_with_w(orgin_output, final_hidden_state)
output2 = self.embed_matrix1(seq_2)
conv2, pooling_virus = self.textCNN_second(output2)
orgin_output1, output2= self.lstm1(conv2)
# final_hidden_state : [batch_size, num_layers * num_directions, n_hidden]
final_hidden_state1 = output2[0].permute(1, 0, 2)
atten_out1 = self.attention_net_with_w_virus(orgin_output1, final_hidden_state1)
predictions = self.linear(torch.cat((pooling_abs,atten_out,pooling_virus,atten_out1), 1))
#predictions=self.exponent_neg_manhattan_distance(output1,output2).squeeze()
predictions=self.final_linear(predictions)
predictions=self.activation(predictions)
predictions=predictions.squeeze()
#predictions = self.activation(output).squeeze()
# 截取#CLS#标签所对应的一条向量, 也就是时间序列维度(seq_len)的第0条
# 下面是[batch_size, hidden_dim] 到 [batch_size, 1]的映射
# 我们在这里要解决的是二分类问题
# predictions = self.dense(first_token_tensor)
# 用sigmoid函数做激活, 返回0-1之间的值
#predictions = self.activation(outputs)
compute_loss = nn.BCELoss()
if label is not None:
# 计算loss
loss = compute_loss(predictions, label)
return loss, predictions
else:
return predictions
| [
"noreply@github.com"
] | Eurus-Holmes.noreply@github.com |
18f94647b65564f8455ffaaba1d6773b058d9354 | 6a2c101774903441bc43bcafaef788a7465d38bb | /music_controller/spotify/migrations/0001_initial.py | 6d380eb7954ae335440d0885ddd1edc8b8424db7 | [] | no_license | naistangz/house-party | 9faa66e12a528881cd7e613fede9da2a1ccf8c19 | 2bad502dececbdf7d273c14b9ea96dd9dc9a0c45 | refs/heads/main | 2023-02-17T04:35:31.012691 | 2021-01-17T12:07:36 | 2021-01-17T12:07:36 | 330,140,403 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Generated by Django 3.1.4 on 2021-01-03 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SpotifyToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=50, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('refresh_token', models.CharField(max_length=150)),
('access_token', models.CharField(max_length=150)),
('expires_in', models.DateTimeField()),
('token_type', models.CharField(max_length=50)),
],
),
]
| [
"a6anaistang@hotmail.co.uk"
] | a6anaistang@hotmail.co.uk |
2f58447eca540956aaaab1ccc21b07ff3717c1a8 | 4d2443d54c8a1104cad8ecc60e417e8a5af69450 | /entertainment_center.py | e0bef0107dacc1b001e5bdd21b53091f6f0b3a90 | [] | no_license | xueweiyema/movie_website | 4fa73dfbaf0a395e72eb6edcf91995dd5a2136e5 | 9770f994a291b51d4fd1a7032fc21a5ac3c537aa | refs/heads/master | 2021-01-19T07:09:44.646186 | 2017-04-24T06:41:19 | 2017-04-24T06:41:19 | 87,527,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | import media
import fresh_tomatoes
titles = [
"Pirate Radio", "The Social Network", "Hidden Figures", "The Martian",
"Good Will Hunting", "The Lives of Others"
]
storylines = [
'''A band of rogue DJs that captivated Britain, playing the music that defined a generation and
standing up to a government that wanted classical music, and nothing else, on the airwaves.''',
'''Harvard student Mark Zuckerberg creates the social networking site that would become known as
Facebook, but is later sued by two brothers who claimed he stole their idea, and the co-founder
who was later squeezed out of the business.''',
'''The story of a team of African-American women mathematicians who served a vital role in NASA
during the early years of the US space program.''',
'''An astronaut becomes stranded on Mars after his team assume him dead, and must rely on his
ingenuity to find a way to signal to Earth that he is alive.''',
'''Will Hunting, a janitor at M.I.T., has a gift for mathematics, but needs help from a psychologist
to find direction in his life.''',
'''In 1984 East Berlin, an agent of the secret police, conducting surveillance on a writer and his
lover, finds himself becoming increasingly absorbed by their lives.''',
]
wiki_head = "https://upload.wikimedia.org/wikipedia/en/"
youtube_head = "https://youtu.be/"
wiki_suffixes = [
"e/e3/The_boat_that_rocked_poster.jpg",
"7/7a/Social_network_film_poster.jpg",
'4/4f/The_official_poster_for_the_film_Hidden_Figures%2C_2016.jpg',
'thumb/c/cd/The_Martian_film_poster.jpg/220px-The_Martian_film_poster.jpg',
'thumb/b/b8/Good_Will_Hunting_theatrical_poster.jpg/220px-Good_Will_Hunting_theatrical_poster.jpg',
'9/9f/Leben_der_anderen.jpg'
]
youtube_suffixes = [
"qX1SSiFWF-s", "lB95KLmpLR4", 'RK8xHq6dfAo', 'ej3ioOneTy8', 'PaZVjZEFkRs',
'FppW5ml4vdw'
]
imdbs = ["7.4", "7.7", "7.9", "8", "8.3", "8.5"]
posters = [wiki_head + wiki_suffix for wiki_suffix in wiki_suffixes]
trailers = [
youtube_head + youtube_suffix for youtube_suffix in youtube_suffixes
]
movies = []
for n in range(len(titles)):
movies.append(
media.Movie(titles[n], storylines[n], posters[n], trailers[n], imdbs[
n]))
print media.Movie.__doc__
fresh_tomatoes.open_movies_page(movies)
| [
"123456"
] | 123456 |
34e2afbf4f41a4aff51f96b7411be3d80992143d | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /swagger_client/models/i_ospfv3_log_adjacency_changes.py | 7ded1a2c2292e58b760cb914f7d1efd4e458559b | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IOspfv3LogAdjacencyChanges(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'include_details': 'bool'
}
attribute_map = {
'include_details': 'includeDetails'
}
def __init__(self, include_details=None): # noqa: E501
"""IOspfv3LogAdjacencyChanges - a model defined in Swagger""" # noqa: E501
self._include_details = None
self.discriminator = None
if include_details is not None:
self.include_details = include_details
@property
def include_details(self):
"""Gets the include_details of this IOspfv3LogAdjacencyChanges. # noqa: E501
:return: The include_details of this IOspfv3LogAdjacencyChanges. # noqa: E501
:rtype: bool
"""
return self._include_details
@include_details.setter
def include_details(self, include_details):
"""Sets the include_details of this IOspfv3LogAdjacencyChanges.
:param include_details: The include_details of this IOspfv3LogAdjacencyChanges. # noqa: E501
:type: bool
"""
self._include_details = include_details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IOspfv3LogAdjacencyChanges, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IOspfv3LogAdjacencyChanges):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
0e2f843c6f77029a11b47217263877d7bcd0f437 | aeea7889a986e23ababbfc470e5fa97a4982bca0 | /devel/lib/python2.7/dist-packages/pal_interaction_msgs/msg/_WebGuiEvent.py | 068b236c1fdeb1d78c4037e05350bf894fd1ee86 | [] | no_license | robstolarz/sturdy-broccoli | 834798751985a0e77c8791859d9d5a8398da0416 | 2e4ae8f1966f01cab4938b8c5b42e3cfd1d9370a | refs/heads/master | 2021-01-20T07:09:07.867184 | 2017-05-15T14:10:56 | 2017-05-15T14:10:56 | 89,967,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,214 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_interaction_msgs/WebGuiEvent.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class WebGuiEvent(genpy.Message):
_md5sum = "bd4a90be174b9e14b06cf397c1359fb1"
_type = "pal_interaction_msgs/WebGuiEvent"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# message used by rb_flango
string name
# Expected contents:
# goTo
# setLanguage
string arg
"""
__slots__ = ['name','arg']
_slot_types = ['string','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,arg
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(WebGuiEvent, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.arg is None:
self.arg = ''
else:
self.name = ''
self.arg = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.arg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.arg = str[start:end].decode('utf-8')
else:
self.arg = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.arg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.arg = str[start:end].decode('utf-8')
else:
self.arg = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| [
"robert.stolarz@yandex.com"
] | robert.stolarz@yandex.com |
d86ce35082ffe54da97e3fc64900cb94273d31e3 | 01031a3d3a33f7591185049e0e44526d9b852821 | /SLAM/FastSLAM/fast_slam.py | 5fb58f0f6c79cffce9e14f0fb876674a3d575e77 | [
"MIT"
] | permissive | matthewgan/PythonRobotics | 8555e2afe95d09c12c5e18ab4658b8e9e3f6817c | ba926c6307e353dbef0d6ee67f5156ec923dc974 | refs/heads/master | 2021-04-06T03:39:43.702250 | 2018-03-10T18:38:06 | 2018-03-10T18:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,420 | py | """
Fast SLAM example
author: Atsushi Sakai (@Atsushi_twi)
"""
import numpy as np
import math
import matplotlib.pyplot as plt
# EKF state covariance
Cx = np.diag([0.5, 0.5, math.radians(30.0)])**2
# Simulation parameter
Qsim = np.diag([0.2, math.radians(1.0)])**2
Rsim = np.diag([1.0, math.radians(10.0)])**2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
MAX_RANGE = 20.0 # maximum observation range
M_DIST_TH = 2.0 # Threshold of Mahalanobis distance for data association.
STATE_SIZE = 3 # State size [x,y,yaw]
LM_SIZE = 2 # LM srate size [x,y]
N_PARTICLE = 100 # number of particle
show_animation = True
class Particle:
def __init__(self, N_LM):
self.w = 1.0 / N_PARTICLE
self.x = 0.0
self.y = 0.0
self.yaw = 0.0
self.lm = np.zeros((N_LM, 2))
def normalize_weight(particles):
sumw = sum([particles[ip].w for ip in range(N_PARTICLE)])
for i in range(N_PARTICLE):
particles[i].w = particles[i].w / sumw
return particles
def calc_final_state(particles):
particles = normalize_weight(particles)
xEst = np.zeros((STATE_SIZE, 1))
for i in range(N_PARTICLE):
xEst[0, 0] += particles[i].w * particles[i].x
xEst[1, 0] += particles[i].w * particles[i].y
xEst[2, 0] += particles[i].w * particles[i].yaw
xEst[2, 0] = pi_2_pi(xEst[2, 0])
return xEst
def predict_particles(particles, u):
for i in range(N_PARTICLE):
px = np.zeros((STATE_SIZE, 1))
px[0, 0] = particles[i].x
px[1, 0] = particles[i].y
px[2, 0] = particles[i].yaw
ud = u + np.matrix(np.random.randn(1, 2)) * Rsim # add noise
px = motion_model(px, ud)
particles[i].x = px[0, 0]
particles[i].y = px[1, 0]
particles[i].yaw = px[2, 0]
return particles
def add_new_lm(particle, z):
r = z[0, 0]
b = z[0, 1]
lm_id = int(z[0, 2])
s = math.sin(particle.yaw + b)
c = math.cos(particle.yaw + b)
particle.lm[lm_id, 0] = particle.x + r * c
particle.lm[lm_id, 1] = particle.y + r * s
return particle
def compute_weight(particle, z):
lm_id = int(z[0, 2])
lmxy = np.matrix(particle.lm[lm_id, :])
zxy = z[0, 0:2]
# print(lmxy)
# print(zxy)
dx = (lmxy - zxy).T
S = np.eye(2)
num = math.exp(-0.5 * dx.T * np.linalg.inv(S) * dx)
den = 2.0 * math.pi * math.sqrt(np.linalg.det(S))
w = num / den
return w
def update_with_observation(particles, z):
for iz in range(len(z[:, 0])):
lmid = int(z[iz, 2])
for ip in range(N_PARTICLE):
# new landmark
if abs(particles[ip].lm[lmid, 0]) <= 0.1:
particles[ip] = add_new_lm(particles[ip], z[iz, :])
# known landmark
else:
w = compute_weight(particles[ip], z[iz, :]) # w = p(z_k | x_k)
particles[ip].w = particles[ip].w * w
# particles(i)= feature_update(particles(i), zf, idf, R)
return particles
def fast_slam(particles, PEst, u, z):
# Predict
particles = predict_particles(particles, u)
# Observation
particles = update_with_observation(particles, z)
xEst = calc_final_state(particles)
return xEst, PEst
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.matrix([v, yawrate]).T
return u
def observation(xTrue, xd, u, RFID):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = np.matrix(np.zeros((0, 3)))
for i in range(len(RFID[:, 0])):
dx = RFID[i, 0] - xTrue[0, 0]
dy = RFID[i, 1] - xTrue[1, 0]
d = math.sqrt(dx**2 + dy**2)
angle = pi_2_pi(math.atan2(dy, dx))
if d <= MAX_RANGE:
dn = d + np.random.randn() * Qsim[0, 0] # add noise
anglen = angle + np.random.randn() * Qsim[1, 1] # add noise
zi = np.matrix([dn, anglen, i])
z = np.vstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * Rsim[0, 0]
ud2 = u[1, 0] + np.random.randn() * Rsim[1, 1]
ud = np.matrix([ud1, ud2]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.matrix([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.matrix([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = F * x + B * u
return x
def calc_n_LM(x):
n = int((len(x) - STATE_SIZE) / LM_SIZE)
return n
def calc_LM_Pos(x, z):
zp = np.zeros((2, 1))
zp[0, 0] = x[0, 0] + z[0, 0] * math.cos(x[2, 0] + z[0, 1])
zp[1, 0] = x[1, 0] + z[0, 0] * math.sin(x[2, 0] + z[0, 1])
return zp
def get_LM_Pos_from_state(x, ind):
lm = x[STATE_SIZE + LM_SIZE * ind: STATE_SIZE + LM_SIZE * (ind + 1), :]
return lm
def search_correspond_LM_ID(xAug, PAug, zi):
"""
Landmark association with Nearest Neighbor
"""
nLM = calc_n_LM(xAug)
mdist = []
for i in range(nLM):
# lm = get_LM_Pos_from_state(xAug, i)
# # y, S, H = calc_innovation(lm, xAug, PAug, zi, i)
# mdist.append(y.T * np.linalg.inv(S) * y)
pass
mdist.append(M_DIST_TH) # new landmark
minid = mdist.index(min(mdist))
return minid
def pi_2_pi(angle):
while(angle > math.pi):
angle = angle - 2.0 * math.pi
while(angle < -math.pi):
angle = angle + 2.0 * math.pi
return angle
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y]
RFID = np.array([[10.0, -2.0],
[15.0, 10.0],
[3.0, 15.0],
[-5.0, 20.0]])
N_LM = RFID.shape[0]
# State Vector [x y yaw v]'
xEst = np.matrix(np.zeros((STATE_SIZE, 1)))
xTrue = np.matrix(np.zeros((STATE_SIZE, 1)))
PEst = np.eye(STATE_SIZE)
xDR = np.matrix(np.zeros((STATE_SIZE, 1))) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
particles = [Particle(N_LM) for i in range(N_PARTICLE)]
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID)
xEst, PEst = fast_slam(particles, PEst, ud, z)
x_state = xEst[0:STATE_SIZE]
# store data history
hxEst = np.hstack((hxEst, x_state))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
if show_animation:
plt.cla()
plt.plot(RFID[:, 0], RFID[:, 1], "*k")
plt.plot(xEst[0], xEst[1], "xr")
for i in range(N_PARTICLE):
plt.plot(particles[i].x, particles[i].y, ".r")
# plot landmark
for i in range(calc_n_LM(xEst)):
plt.plot(xEst[STATE_SIZE + i * 2],
xEst[STATE_SIZE + i * 2 + 1], "xg")
plt.plot(np.array(hxTrue[0, :]).flatten(),
np.array(hxTrue[1, :]).flatten(), "-b")
plt.plot(np.array(hxDR[0, :]).flatten(),
np.array(hxDR[1, :]).flatten(), "-k")
plt.plot(np.array(hxEst[0, :]).flatten(),
np.array(hxEst[1, :]).flatten(), "-r")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
| [
"asakai.amsl+github@gmail.com"
] | asakai.amsl+github@gmail.com |
1cc5122e5c66d396e0dc0b524d9525bc39c29fb8 | 9f7c106d50681b394d822fbdc5e3ad25f04d927c | /week6_nissi_miika/week6_ass10_nissi_miika.py | ffc57d3b785e28d30b956c0c90436868710caa64 | [] | no_license | miikanissi/python_course_summer_2020 | edf032b1d9815dfa6e0b5f7c902f7b469117c04f | 3969288b969b3db8f9d7f2fdb67905f13d4969fa | refs/heads/master | 2022-12-02T09:33:42.625374 | 2020-08-24T17:38:59 | 2020-08-24T17:38:59 | 273,909,320 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | def search(array, n):
for i in array:
if i == n:
return True
return False
arr = [23,4,89,19,0,700,30]
print("Number 19 found: ", search(arr, 19))
print("Number 20 found: ", search(arr, 20))
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
52fe8f06f0857f20301770af0233c347deb1dcc6 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/reservation/azext_reservation/aaz/latest/reservations/reservation/_archive.py | 734b5fe8a45e886531655e01256e5a4e690f51e7 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 3,904 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"reservations reservation archive",
)
class Archive(AAZCommand):
"""Archiving a `Reservation` which is in cancelled/expired state and move it to `Archived` state.
:example: Archiving a reservation
az reservations reservation archive --reservation-order-id 40000000-aaaa-bbbb-cccc-20000000000 --reservation-id 50000000-aaaa-bbbb-cccc-200000000000
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/providers/microsoft.capacity/reservationorders/{}/reservations/{}/archive", "2022-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.reservation_id = AAZStrArg(
options=["--reservation-id"],
help="Id of the Reservation Item",
required=True,
)
_args_schema.reservation_order_id = AAZStrArg(
options=["--reservation-order-id"],
help="Order Id of the reservation",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ReservationArchive(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ReservationArchive(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}/archive",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"reservationId", self.ctx.args.reservation_id,
required=True,
),
**self.serialize_url_param(
"reservationOrderId", self.ctx.args.reservation_order_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _ArchiveHelper:
"""Helper class for Archive"""
__all__ = ["Archive"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e70678a04a2923ec5b42793b99a701886f815120 | 023763d9f86116381f5765c51fb8b403e8eef527 | /Other/M-SOLUTIONS プロコンオープン 2020/m_solutions2020_c.py | a2a8e2a8ef4bdc030af9099c1af7be71984e6691 | [] | no_license | Hilary02/atcoder | d45589682159c0f838561fc7d0bd25f0828e578b | 879c74f3acc7befce75abd10abf1ab43967fc3c7 | refs/heads/master | 2021-07-18T11:34:22.702502 | 2021-07-11T09:04:12 | 2021-07-11T09:04:12 | 144,648,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | n, k = [int(w) for w in input().split()]
la = [int(w) for w in input().split()]
for i in range(k, n):
cond = la[i] > la[i-k]
print("Yes" if cond else "No")
| [
"c011605154@edu.teu.ac.jp"
] | c011605154@edu.teu.ac.jp |
f8392b31b550f3e9534ab9a3e39f8baed6780ccb | bd5c1f86971a068e9a4ea57459653d649ea4c50e | /tests/unit/test_util.py | 18973c6f16a056c938ac6e25dcb06fc61da6117c | [
"MIT"
] | permissive | monoflo/bloop | 553372c7155de386afd098e3c91435186064a5d4 | c476298e5a40decf9fdf2ed50df74be8f91fdffd | refs/heads/master | 2020-04-06T12:54:19.184591 | 2018-11-14T22:06:01 | 2018-11-14T22:06:01 | 157,475,484 | 0 | 0 | MIT | 2018-11-14T02:04:44 | 2018-11-14T02:04:44 | null | UTF-8 | Python | false | false | 3,510 | py | import collections
import gc
import pytest
from bloop.models import BaseModel, Column
from bloop.types import Integer
from bloop.util import (
Sentinel,
WeakDefaultDictionary,
index,
ordered,
walk_subclasses,
)
def test_index():
"""Index by each object's value for an attribute"""
class Person:
def __init__(self, name):
self.name = name
p1, p2, p3 = Person("foo"), Person("bar"), Person("baz")
assert index([p1, p2, p3], "name") == {
"foo": p1,
"bar": p2,
"baz": p3
}
@pytest.mark.parametrize("obj", [None, object(), 2, False, "abc"])
def test_ordered_basic_objects(obj):
"""Things that don't need to be unpacked or flattened for comparison"""
assert ordered(obj) is obj
@pytest.mark.parametrize("it", [
iter(list("bac")),
["b", "c", "a"],
("c", "a", "b"),
(x for x in "cba"),
{"a", "c", "b"}
])
def test_ordered_iterable(it):
"""Any non-mapping iterable is sorted, even if it's consumable"""
expected = ["a", "b", "c"]
assert ordered(it) == expected
@pytest.mark.parametrize("mapping", [
{"b": True, "a": "zebra", "c": None},
collections.OrderedDict([("c", None), ("b", True), ("a", "zebra")])
])
def test_ordered_mapping(mapping):
"""Mappings are flattened into (key, value) tuples and then those tuples are sorted"""
expected = [
("a", "zebra"),
("b", True),
("c", None)
]
assert ordered(mapping) == expected
@pytest.mark.parametrize("obj, expected", [
# mapping int -> set(str)
({3: {"a", "b"}, 2: {"c", "b"}, 1: {"a", "c"}}, [(1, ["a", "c"]), (2, ["b", "c"]), (3, ["a", "b"])]),
# mapping str -> list(int)
({"b": [1, 2], "a": [3, 2], "c": [1, 3]}, [("a", [2, 3]), ("b", [1, 2]), ("c", [1, 3])]),
# list(set(bool))
([{False}, {True}], [[False], [True]]),
])
def test_ordered_recursion(obj, expected):
"""Mappings and iterables inside each other are sorted and flattened"""
assert ordered(obj) == expected
def test_walk_subclasses():
class A:
pass
class B: # Not included
pass
class C(A):
pass
class D(A):
pass
class E(C, A): # would be visited twice without dedupe
pass
class F(D, A): # would be visited twice without dedupe
pass
# list instead of set ensures we don't false succeed on duplicates
subclasses = sorted(walk_subclasses(A), key=lambda c: c.__name__)
assert subclasses == [C, D, E, F]
def test_sentinel_uniqueness():
sentinel = Sentinel("name")
same_sentinel = Sentinel("NAME")
assert sentinel is same_sentinel
def test_sentinel_repr():
foo = Sentinel("foo")
assert repr(foo) == "<Sentinel[foo]>"
def test_weakref_default_dict():
"""Provides defaultdict behavior for a WeakKeyDictionary"""
class MyModel(BaseModel):
id = Column(Integer, hash_key=True)
data = Column(Integer)
def new(i):
obj = MyModel(id=i, data=2 * i)
return obj
weak_dict = WeakDefaultDictionary(lambda: {"foo": "bar"})
n_objs = 10
objs = [new(i) for i in range(n_objs)]
for obj in objs:
# default_factory is called
assert weak_dict[obj] == {"foo": "bar"}
# don't keep a reference to the last obj, throws off the count below
del obj
calls = 0
while weak_dict:
del objs[0]
gc.collect()
calls += 1
assert len(weak_dict) == len(objs)
assert calls == n_objs
| [
"joe.mcross@gmail.com"
] | joe.mcross@gmail.com |
321af11c680482b013a293d81093854eec9201fc | 4680b7f858232806ea15bf2464ec4b6401d93cf0 | /src/joins/models.py | e62ea42028a7e3943d20ac673c2baa3bcb0d8b56 | [] | no_license | Tushant/socialSharingCampaign | 69017e602648ea8ef6e02092668039d61844b61f | 96dc8176be1cf64e9ef4ec6a305c61666612be20 | refs/heads/master | 2020-04-02T06:45:17.103905 | 2016-07-18T02:09:51 | 2016-07-18T02:09:51 | 63,562,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | from django.db import models
# Create your models here.
class Join(models.Model):
email = models.EmailField()
friend = models.ForeignKey("self", related_name='referral',\
null=True, blank=True)
ref_id = models.CharField(max_length=120, default='ABC', unique=True)
count_added = models.ForeignKey("self",null=True,related_name='count',blank=True)
ip_address = models.CharField(max_length=120, default='ABC')
timestamp = models.DateTimeField(auto_now_add = True, auto_now=False)
updated = models.DateTimeField(auto_now_add = False, auto_now=True)
def __unicode__(self):
return "%s" %(self.email)
class Meta:
unique_together = ("email", "ref_id",)
# class JoinFriends(models.Model):
# email = models.OneToOneField(Join, related_name="Sharer")
# friends = models.ManyToManyField(Join, related_name="Friend", \
# null=True, blank=True)
# emailall = models.ForeignKey(Join, related_name='emailall')
# def __unicode__(self):
# print "friends are ", self.friends.all()
# print self.emailall
# print self.email
# return self.email.email | [
"programmertushant@gmail.com"
] | programmertushant@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.