blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e5b73246e9bf01a3fa6831a2fc46a5a4f921ff8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_048/ch78_2020_04_11_03_56_59_345262.py | bb5a6d95248403b0737f835dd5b64ee23d9c67f7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import math
diciok=[]
diciov=[]
continua=True
while continua:
r=input('Digite um nome')
diciok.append(r)
if r=='sair':
break
u=input('Digite uma aceleracao')
diciov.append(int(u))
dicio=dict(zip(diciok,diciov))
def calcula_tempo(dicio):
k=dicio.keys()
v=list(dicio.values())
d=[0]*len(v)
for i in range(len(v)):
d[i]=math.sqrt(200/v[i])
n=dict(zip(k, d))
j=min(n.values())
for chave,valor in n.items():
if valor==j:
return print('O vencedor é {0} com tempo de conclusão de {1} s'.format(chave,j)) | [
"you@example.com"
] | you@example.com |
71d9b837299e9da2d6851ac2153b22150b38ad1f | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/remote_protection_group_snapshot_get_response.py | f3f01b74864fe85980624ee3cc7874ab70a375f9 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 5,788 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class RemoteProtectionGroupSnapshotGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[RemoteProtectionGroupSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.RemoteProtectionGroupSnapshot]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[RemoteProtectionGroupSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupSnapshotGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupSnapshotGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupSnapshotGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemoteProtectionGroupSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoteProtectionGroupSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
b42d9964bbce3a6b6dcdb1e6b5a49ec0faa8fdfa | 8218813b16d2ea2b39b6bd599a0c45d698389c32 | /ansible/roles/lib_zabbix/build/ansible/zbx_maintenance.py | 5a1ced7f99961bd72389d822d28960f49d43144d | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | zgalor/openshift-tools | 05f500bea6f7beb6b62e3c9f406751ea913f2639 | 63c8d0aea8f237939f290dc6e91530d9767f1945 | refs/heads/stg | 2021-01-12T16:24:59.753014 | 2016-10-25T22:11:06 | 2016-10-25T22:11:06 | 71,991,757 | 0 | 0 | null | 2016-10-26T10:18:56 | 2016-10-26T10:18:56 | null | UTF-8 | Python | false | false | 1,361 | py | # pylint: skip-file
def main():
''' Create a maintenace in zabbix '''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
zbx_sslverify=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'list'], type='str'),
hosts=dict(default=None, type='list'),
hostgroups=dict(default=None, type='list'),
name=dict(default=None, type='str'),
description=dict(default=None, type='str'),
start_time=dict(default=int(time.time()), type='int'),
duration=dict(default=60, type='int'),
data_collection=dict(default=True, type='bool'),
),
supports_check_mode=False
)
rval = ZbxMaintenance.run_ansible(module.params)
module.exit_json(**rval)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| [
"kwoodson@redhat.com"
] | kwoodson@redhat.com |
f6912355091fa1ab626656b327d3abb7a2ded441 | 11d265eba2ced9de43c339e4014c779b521320cd | /accounts/migrations/0003_auto_20200423_2251.py | 9e85e74a27235d2afa6d5ee07d8608f4d3d364ed | [] | no_license | Sloshpit/budget_old | d9271de625cd7e3aa66ccbec501b005e50cd2812 | a5603996b026542adb3bc8c578c03bcb843bea01 | refs/heads/master | 2022-04-23T08:42:43.377827 | 2020-04-25T14:40:39 | 2020-04-25T14:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.0.5 on 2020-04-24 02:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20200423_2248'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='tr',
new_name='transaction',
),
]
| [
"neel.maheshwari@gmail.com"
] | neel.maheshwari@gmail.com |
30f90e427b984555b702c00242861da4f22e2aa2 | 205be8d429df36e27cdfc048bfca9212c5a62a87 | /ward/views.py | 616ad0dc9cc222cafec6a2601e1b9e59eda027dd | [] | no_license | KennyChrisUmurundi/HOsto | 16c8f926282fc48c981532447f1685fbbc2b457c | 33fa31524a08934f3deb8f622a1b1554d8ef1af4 | refs/heads/master | 2022-04-01T02:42:39.146227 | 2020-01-07T11:44:08 | 2020-01-07T11:44:08 | 193,458,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,425 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from reception.models import Patient, Appointment
from django.db.models import Sum
from django.contrib.auth.decorators import login_required
from .models import PatientStatus, PatientReport
from . import forms as ward_forms
import hashlib
# Create your views here.
@login_required(login_url='/')
def ward_home(request):
PatientCode = request.POST.get('PatientCode', False)
context = {
'PatientByCode' : Patient.objects.filter(code__icontains=PatientCode),
}
return render(request, 'ward/ward_home.html', context)
@login_required(login_url='/')
def inpatient_info(request, code):
thecode = code
apptmt = Appointment.objects.filter(patient=thecode).aggregate(Sum('price'))['price__sum']
context = {
'CurrentPatient' : Patient.objects.filter(code=thecode),
'reception' : Appointment.objects.filter(patient=thecode),
'RecTotalPrice' : apptmt,
}
return render(request, 'ward/inpatient_info.html', context)
@login_required(login_url='/')
def patient_status_update(request, code, id):
return render(request, 'ward/patientStatus.html')
@login_required(login_url='/')
def patient_status_update(request, code):
app_instance = get_object_or_404(Patient, code=code)
update_patientStatus_form = ward_forms.PatientStatusUpdate(request.POST or None, instance=app_instance.patientstatus)
if request.method == 'POST':
update_patientStatus_form = ward_forms.PatientStatusUpdate(request.POST or None, instance=app_instance.patientstatus)
if update_patientStatus_form.is_valid():
u_patientStatus = update_patientStatus_form.save(commit=False)
u_patientStatus.status = app_instance.patientstatus.status
update_patientStatus_form.save()
print('success')
if request.user.role.role == 'is_doctor':
return redirect('doctor:patient', code=app_instance.code)
elif request.user.role.role == 'is_nurse':
return redirect('ward:nurse-home')
else:
print('not valid')
else:
update_patientStatus_form = ward_forms.PatientStatusUpdate(instance=app_instance.patientstatus)
context = {
'update_patientStatus_form' : update_patientStatus_form,
'patient' : Patient.objects.filter(code=code),
}
return render(request, 'ward/patientStatus.html', context)
def nurse_home(request):
context = {
'inpatient' : PatientStatus.objects.filter(status='InPatient')
}
return render(request, 'ward/nurse_home.html', context)
def patientFinalReport(request, code, id):
if request.method == 'POST':
patient = Patient.objects.get(id=request.POST['patient'])
prescription = request.POST['prescription']
n = len(prescription.split())
print(type(n))
print(n)
report = request.POST['report']
p_report = PatientReport.objects.filter(patient=patient)
if not p_report:
print('Yes')
PatientReport.objects.create(
patient = patient,
doctor_prescription = prescription,
nurse_report = report
)
else:
for patient_report in p_report:
p = len(patient_report.doctor_prescription.split())
print(type(p))
print(p)
if p is not None and n == p:
print('Nothing was modify')
else:
print('Yes')
# PatientReport.objects.create(
# patient = patient,
# doctor_prescription = prescription,
# nurse_report = report
# )
return HttpResponse('')
| [
"ndayikennysmuusic@gmail.com"
] | ndayikennysmuusic@gmail.com |
183e831869771e445da8acbd31a8537370b2240a | 9fe219d1971d0e8613eaa99b7ba238bedf4258c1 | /bmf_proc.py | 13486c33a54b37ca4fd7bd1fb1be80af44908494 | [] | no_license | ShawnYi5/restore-iso | bb5fb0fdb1b5f6b200428266c7318e1ef27d6c59 | 725141f2283cc2c94c55f042b1929c845a1b8b14 | refs/heads/master | 2022-10-27T19:21:23.990688 | 2019-08-22T03:13:50 | 2019-08-22T03:13:50 | 203,700,366 | 0 | 2 | null | 2022-10-13T06:03:20 | 2019-08-22T02:35:54 | Python | UTF-8 | Python | false | false | 5,310 | py | import json
import os
import sys
import win32api
import win32file
import win32con
current_dir = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(current_dir)
import xlogging
class CBmfProc(xlogging.WorkWithLogger):
def __init__(self):
xlogging.WorkWithLogger.__init__(self, r'bmf_proc', 188)
def search_need_disk_guid(self):
ret_num = -1
flag_string = r'hhekaxxm9idsvW5PdutqgPthyuwuqwq6w5yjfbt9zgTbCtkvebrrknmpzspqhuC2'
for i in range(26):
try:
handle = win32file.CreateFile('\\\\.\\PhysicalDrive' + str(i), win32con.GENERIC_READ,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, None,
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL, 0)
win32file.SetFilePointer(handle, 1024 * 1024 * 2 - 512, win32con.FILE_BEGIN)
(ret, ret_str) = win32file.ReadFile(handle, 512, None)
win32api.CloseHandle(handle)
if ret != 0:
self.logger.info(
'win32file.CreateFile error file = {},continue search'.format('\\\\.\\PhysicalDrive' + str(i)))
continue
if -1 != ret_str.find(flag_string.encode('utf-8')):
ret_num = i
self.logger.info('find flag_string error file ,continue search')
break
except:
continue
return ret_num
def read_bin_file_no_print_context(self, file_path):
try:
max_buffer_bytes = 8 * 1024 * 1024
with open(file_path, 'rb') as file_handle:
while True:
read_bytes = len(file_handle.read(max_buffer_bytes))
self.logger.info("file_path = {},read len = {}".format(file_path, read_bytes))
if read_bytes < max_buffer_bytes or read_bytes == 0:
break
except Exception as e:
self.logger.error(r'read_bin_file_no_print_context {} failed. {}'.format(file_path, e), exc_info=True)
def get_windows_version(self):
ver_info = win32api.GetVersionEx()
self.logger.info('ver_info = {}'.format(ver_info))
return ver_info[0], ver_info[1]
def write_ext_info(self, disk_handle):
windows_major_version, windows_minor_version = self.get_windows_version()
ext_info = {'windows_version': {'major': windows_major_version, 'minor': windows_minor_version}}
ext_info_data = json.dumps(ext_info).encode().ljust(512, b'\0')
win32file.SetFilePointer(disk_handle, 1024 * 1024 * 2, win32con.FILE_BEGIN)
win32file.WriteFile(disk_handle, ext_info_data, None)
def work_real(self):
disk_num = self.search_need_disk_guid()
if -1 == disk_num:
raise Exception('bmf can not find disk guid')
windows_dir = win32api.GetWindowsDirectory()
self.logger.info(windows_dir)
windows_list = os.listdir(windows_dir)
self.logger.info(windows_list)
bmf_list = []
for i in windows_list:
if i.endswith('.bmf'):
bmf_list.append(os.path.join(windows_dir, i))
self.logger.info(bmf_list)
bmf_list.sort()
self.logger.info(bmf_list)
disk_handle = win32file.CreateFile('\\\\.\\PhysicalDrive' + str(disk_num), win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, None,
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL, 0)
self.logger.info('floppy_handle = {} {}'.format(disk_handle, disk_num))
win32file.SetFilePointer(disk_handle, 4 * 1024, win32file.FILE_BEGIN)
self.logger.info('skip 4k')
for i in bmf_list:
# 必须把bmf文件完整的读取,否则在bmf文件跨越 64k 块并且未读取过时,会被还原掉。。。
self.read_bin_file_no_print_context(i)
handle = win32file.CreateFile(i, win32con.GENERIC_READ,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL, 0)
self.logger.info('bmf name = {},file handle = {}'.format(i, handle))
(ret, ret_str) = win32file.ReadFile(handle, 4 * 1024, None)
if ret != 0 or len(ret_str) != 4 * 1024:
self.logger.info('ReadFile error,file = {} len = {}'.format(i, len(ret_str)))
win32api.CloseHandle(handle)
continue
self.logger.info(ret_str)
ret, _ = win32file.WriteFile(disk_handle, ret_str, None)
if ret != 0:
raise Exception('bmf WriteFile err ret = {}'.format(ret))
else:
self.logger.info('WriteFile success : {}'.format(i))
win32api.CloseHandle(handle)
self.write_ext_info(disk_handle)
win32api.CloseHandle(disk_handle)
if __name__ == "__main__":
cbmf_proc = CBmfProc()
cbmf_proc.work()
| [
"yi.shihong@aliyun.com"
] | yi.shihong@aliyun.com |
46bc3df195b3fe48b2c3449145f923d6b8818e5d | 8668830f34ce260565217ea3b49e090778780b44 | /coupon/factories/coupon_factory.py | d014517eb75905fd048a06d8f80fd5d7f104676e | [] | no_license | wcirillo/ten | 72baf94da958b2ee6f34940c1fc3116660436762 | a780ccdc3350d4b5c7990c65d1af8d71060c62cc | refs/heads/master | 2016-09-06T13:39:03.966370 | 2015-07-02T12:37:36 | 2015-07-02T12:37:36 | 15,700,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,446 | py | """ Coupon Factory used to help create quick Coupon instances for tests. """
from advertiser.factories.location_factory import COUPON_LOCATION_FACTORY
from coupon.factories.offer_factory import OFFER_FACTORY
from coupon.models import Coupon
class CouponFactory(object):
""" Coupon Factory Class """
@staticmethod
def _create(offer, coupon_type_id=3):
""" Create a single coupon instance. """
coupon = Coupon.objects.create(offer=offer,
coupon_type_id=coupon_type_id)
return coupon
def create_coupon(self, offer=None, **kwargs):
""" Create a ONE basic coupon instance with an offer, business and
advertiser association. """
create_location = kwargs.get('create_location', True)
if not offer:
offer = OFFER_FACTORY.create_offer(**kwargs)
coupon = self._create(offer=offer)
if create_location:
COUPON_LOCATION_FACTORY.create_coupon_location(coupon)
return coupon
def create_coupons(self, offer=None, create_offer=False, create_count=1):
""" This method will do 1 of 3 things.
default.) offer == None
create_offer == False
Create 1 or more offers and associate them with
different offers -> businesses -> advertisers.
Ex: coupon -> offer -> business -> advertiser
coupon1 -> offer1 -> business1 -> advertiser1
coupon2 -> offer2 -> business2 -> advertiser2
coupon3 -> offer3 -> business3 -> advertiser3
2.) offer == None
create_offer == True
Create an offer -> business -> advertiser.
Then create coupons for that offer.
Ex: coupon -> offer -> business -> advertiser
coupon1 -> offer -> business -> advertiser
coupon2 -> offer -> business -> advertiser
3.) offer != None
create_offer == False
If an offer is passed in use that offer.
Create 1 or more coupons and associate them with the
same offer -> business -> advertiser.
Ex: coupon -> offer -> business -> advertiser
coupon1 -> offer -> business -> advertiser
coupon2 -> offer -> business -> advertiser
"""
coupon_list = []
current_create_count = 0
create_many_offers = True
if create_offer:
offer = OFFER_FACTORY.create_offer()
create_many_offers = False
else:
if offer:
create_many_offers = False
while current_create_count < create_count:
if create_many_offers:
offer = OFFER_FACTORY.create_offer()
coupon = self._create(offer=offer)
COUPON_LOCATION_FACTORY.create_coupon_location(coupon)
current_create_count += 1
coupon_list.append(coupon)
return coupon_list
def create_coupon_many_locations(self, offer=None, create_all=True,
business_location_count=1, coupon_location_count=1):
""" Create a coupon with multiple locations associated with it.
ARG Definitions:
create_all == True will ensure that every business_location will get
associated with this coupon.
business_location_count == the number of locations that the respective
business of this coupon will have in total.
coupon_location_count == The number of business_locations that will be
associated with this coupon.
"""
coupon = self.create_coupon(offer=offer)
#current_create_count = 1
#while(current_create_count < business_location_count):
COUPON_LOCATION_FACTORY.create_coupon_locations(coupon,
create_all=create_all,
business_location_count=business_location_count,
coupon_location_count=coupon_location_count)
# current_create_count += 1
return coupon
def create_coupons_many_locations(self, offer=None, create_all=True,
create_count=1, **kwargs):
""" Create multiple coupons with multiple locations associated with each
one.
ARG Definitions:
create_all == True will ensure that every business_location will get
associated with this coupon.
business_location_count == the number of locations that the respective
business of this coupon will have in total.
coupon_location_count == The number of business_locations that will be
associated with this coupon.
"""
coupon_list = self.create_coupons(offer=offer,
create_count=create_count)
for coupon in coupon_list:
COUPON_LOCATION_FACTORY.create_coupon_locations(coupon,
create_all=create_all,
business_location_count=kwargs.get('business_location_count', 1),
coupon_location_count=kwargs.get('coupon_location_count', 1))
return coupon_list
@staticmethod
def normalize_coupon_locations(coupon):
""" Normalize locations of this coupon to NY state. """
locations = coupon.location.all()
for location in locations:
location.location_state_province = 'NY'
location.save()
COUPON_FACTORY = CouponFactory()
| [
"williamcirillo@gmail.com"
] | williamcirillo@gmail.com |
8563336093cb3a3bb777b14a25ef9b23beb1ffcf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/116/usersdata/189/26740/submittedfiles/al1.py | 6014af9ec62b958ab7597cc940108dd8387f02ba | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # -*- coding: utf-8 -
r=float(input("digite o raio:"))
a=float(input("digite a altura:"))
v=3.14159*(r**2)*a
Print("o volume é %.2v" %v) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
97e0c17a95a962957c152e80ff839d72bcad99b9 | a1b3b1f6719bd00acabf0e411ad55058447d2103 | /commis/clients/search_indexes.py | 0cb58ccc9fc9866485ab65b831c2b5ab51dacc56 | [] | no_license | ageron/commis | 8794005fe40e0aa0288fae53a41731d784a1edd3 | a015c734e1d9bb98bf576d1bc2529eda75ac3711 | refs/heads/master | 2021-01-18T07:39:09.064166 | 2013-04-20T21:14:37 | 2013-04-20T21:14:37 | 9,499,256 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from haystack import site
from commis.clients.models import Client
from commis.search.indexes import CommisSearchIndex
class ClientIndex(CommisSearchIndex):
pass
site.register(Client, ClientIndex)
| [
"noah@coderanger.net"
] | noah@coderanger.net |
fc7cf9499d22e579fec326ad1c1bb0dc17dd69c8 | 018a1d8d59c00f69b0489ce05567a2972c335ff7 | /2017_May23/generators/use_logger.py | 61d24cc930c5596fd8704acd8a2e45b02303a186 | [] | no_license | singhujjwal/python | f0127b604e2204a02836c95d89ee4903f760d48c | 4fb4b34a318f093bd944cd70d7f0d69dd7dfef6e | refs/heads/master | 2021-09-20T15:35:13.389400 | 2021-09-03T06:39:58 | 2021-09-03T06:39:58 | 92,157,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from logger import log
testlog = log("test.log", "test program")
for l in testlog:
testlog.send("this is a test message...")
print "sent one log message..."
testlog.send("this is another log message..")
print "Sent another log message..."
testlog.send(None)
| [
"ujjsingh@cisco.com"
] | ujjsingh@cisco.com |
28d2ccf22274d749ca637405de3be579954f8792 | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /estscan.py | 2f748d2268d7e8c36e555456b414934f5a11b82f | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2014/9/15
"""
import urllib,urllib2,re, HTMLParser,time
from lpp import *
RAW = fasta_check(open(sys.argv[1],'rU'))
def get_data(data,result):
url = "http://myhits.isb-sib.ch/cgi-bin/estscan"
values = {
"species":"Drosophila_melanogaster.smat",
"text":data,
"action":"ESTScan",
"indelpenalty":-50
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
Data = response.read()
all_need = re.findall("""<td class='desc_summary' valign='top'><input type='radio' name='seqname' value='([^\']+)'""",Data,re.MULTILINE)
html_parse = HTMLParser.HTMLParser()
END=open(result,'a')
all_need = map(lambda x:html_parse.unescape(x), all_need)
all_need = map(lambda x:re.sub('<[^>]+>','',x),all_need)
#all_need = filter(lambda x:"' onclick='theForm.text.value=this.value;' />" in x, all_need)
END.write('\n'.join(all_need) +'\n')
num=0
cache = ""
end = sys.argv[2]
for t,s in RAW:
num+=1
cache+=t+s
if num%50==0:
get_data(cache, end)
cache = ""
#time.sleep(1)
else:
get_data(cache, end)
| [
"409511038@qq.com"
] | 409511038@qq.com |
9d61a90c8b910a3ff26bc2b1c5d5961defe17a67 | e964a22925a510801ad6395ea087115aa4c86a2e | /trunk/zopen/frs/core/utils.py | 0fa6cbfc785fcdf6964dd61846bd1b20639af69f | [] | no_license | BGCX261/zopen-frs-svn-to-git | 7378590e975ecee9668ccbe884ecb0b4912bf963 | c3725bb99e5ceb219a54e42a2bbfcc4613c8833c | refs/heads/master | 2021-01-21T02:36:30.851387 | 2015-08-25T15:50:59 | 2015-08-25T15:50:59 | 41,602,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | import time
import os
import shutil
import socket
from random import random
from md5 import md5
from config import FS_CHARSET
from types import UnicodeType
def timetag(the_time=None):
if the_time is None:
# use gmt time
the_time = time.gmtime()
return time.strftime('%Y-%m-%d-%H-%M-%S', the_time)
else:
return the_time.strftime('%Y-%m-%d-%H-%M-%S')
def ucopy2(ossrc, osdst):
# ucopy2 dosn't work with unicode filename yet
if type(osdst) is UnicodeType and \
not os.path.supports_unicode_filenames:
ossrc = ossrc.encode(FS_CHARSET)
osdst = osdst.encode(FS_CHARSET)
shutil.copy2(ossrc, osdst)
def ucopytree(ossrc, osdst, symlinks=False):
# ucopy2 dosn't work with unicode filename yet
if type(osdst) is UnicodeType and \
not os.path.supports_unicode_filenames:
ossrc = ossrc.encode(FS_CHARSET)
osdst = osdst.encode(FS_CHARSET)
shutil.copytree(ossrc, osdst, symlinks)
def umove(ossrc, osdst):
# umove dosn't work with unicode filename yet
if type(osdst) is UnicodeType and \
not os.path.supports_unicode_filenames:
ossrc = ossrc.encode(FS_CHARSET)
osdst = osdst.encode(FS_CHARSET)
shutil.move(ossrc, osdst)
try:
_v_network = str(socket.gethostbyname(socket.gethostname()))
except:
_v_network = str(random() * 100000000000000000L)
def make_uuid(*args):
t = str(time.time() * 1000L)
r = str(random()*100000000000000000L)
data = t +' '+ r +' '+ _v_network +' '+ str(args)
uid = md5(data).hexdigest()
return uid
| [
"you@example.com"
] | you@example.com |
3614b185bb6e6f16c39bfc81b77b9c9817f4f4cc | 50a690ab7db8fe98a620f3c54aabd90c3ff3e7f3 | /utils/nms_processor.py | 8124388141a5df4f982118d5c8c9d8c353cd05ff | [] | no_license | yekeren/ADVISE-Image_ads_understanding | 590754909d2f4259a57d32591a15bea845586a0f | 2ea5e1405b1ab178b95f9c2cd9158b16847ac6a3 | refs/heads/master | 2021-10-02T08:01:29.193553 | 2018-11-29T16:32:25 | 2018-11-29T16:32:25 | 103,291,233 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | import numpy as np
import tensorflow as tf
class NMSProcessor(object):
"""Helper class that process non maximum suppression on single image."""
def __init__(self, max_output_size, iou_threshold):
"""Init.
Args:
max_output_size: maximum number of boxes to maintain.
iou_threshold: threhold for intersection over union.
"""
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
self._sess = tf.Session(config=config)
self._boxes = tf.placeholder(dtype=tf.float32, shape=[None, 4])
self._scores= tf.placeholder(dtype=tf.float32, shape=[None])
self._selected = tf.image.non_max_suppression(
self._boxes, self._scores, max_output_size, iou_threshold)
self._selected_boxes = tf.gather(self._boxes, self._selected)
self._selected_scores = tf.gather(self._scores, self._selected)
def process(self, boxes, scores):
"""Process non maximum suppression.
Args:
boxes: a [num_boxes, 4] np array.
scores: a [num_boxes] np array.
Returns:
selected_boxes: a [num_selected_boxes, 4] np array.
"""
return self._sess.run([
self._selected, self._selected_boxes, self._selected_scores],
feed_dict={self._boxes: boxes, self._scores: scores})
| [
"yekeren.cn@gmail.com"
] | yekeren.cn@gmail.com |
6556a57d0ecd1abdbfaa6ae76e58c383a5bacafe | a7d1030cb797b862b87ee3e8b8a206814d26eee2 | /videoburnsubtitles | fa680dc836bf1c6fdd587e2e7baf42a4026708b0 | [] | no_license | lmanul/sak | 8bdf98d2e463f3e171aa79b82557cd4d6ade2724 | 37604f1d0dc61373bd24d73d742afe9c754e62a3 | refs/heads/master | 2023-08-30T07:51:04.727676 | 2023-08-27T06:09:46 | 2023-08-27T06:09:46 | 144,207,029 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | #!/usr/bin/python3
import os
import sys
if __name__ == "__main__":
movie = sys.argv[1]
subs = sys.argv[2]
dot_index = movie.rfind(".")
out = movie[:dot_index] + "_burned" + movie[dot_index:]
cmd = ("ffmpeg "
"-i " + movie + " "
"-vf subtitles=" + subs + " "
"" + out)
# print(cmd)
os.system(cmd)
| [
"m@ma.nu"
] | m@ma.nu | |
7ddfb5530fce7faf50eb533dbce56928fbd1c9a8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /cv5vRuexCzi4hvxdd_23.py | 130aa8cd3bbc2ef6f359e7cfdcaa101194cf122b | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
album_dict = {
2015: ("Vulnicura", "Honeymoon", "Rebel Heart"),
2016: ("Lemonade", "Blackstar", "A Moon Shaped Pool"),
2017: ("Flower Boy", "Antisocialites"),
2018: ("El Mal Querer", "Someone Out There", "Cranberry", "Kamikaze"),
2019: ("thank u next", "Magdalene", "Ode to Joy"),
2020: ("Rough and Rowdy Ways", "folklore", "Future Nostalgia", "Colores")
}
def release_year(album):
for x in album_dict:
if album in album_dict[x]:
return x
return "Unknown"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
78af297cc1a99dc9da70c9593add3529febc2163 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-codeartsdeploy/huaweicloudsdkcodeartsdeploy/v2/model/delete_deployment_group_request.py | 19ce924355b8177a16f4bd1910979b60892d4fa0 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,106 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteDeploymentGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'group_id': 'str'
}
attribute_map = {
'group_id': 'group_id'
}
def __init__(self, group_id=None):
"""DeleteDeploymentGroupRequest
The model defined in huaweicloud sdk
:param group_id: 主机集群id
:type group_id: str
"""
self._group_id = None
self.discriminator = None
self.group_id = group_id
@property
def group_id(self):
"""Gets the group_id of this DeleteDeploymentGroupRequest.
主机集群id
:return: The group_id of this DeleteDeploymentGroupRequest.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this DeleteDeploymentGroupRequest.
主机集群id
:param group_id: The group_id of this DeleteDeploymentGroupRequest.
:type group_id: str
"""
self._group_id = group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteDeploymentGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1530cf27ee08c2d4ffb6f4f98eb94712de8edc18 | f1e3817c280557878c682bb92e1900d61de2d65d | /web_transbank_1/BACKEND/project/commerce/migrations/0002_categoria_imagen_producto.py | 5408c0dc5f9a58966048aa6eee9c2937d98dc397 | [] | no_license | netluxspa/web-transbank | f2f8099fef22883717ca76fc3aaaa7e3e52f91d4 | bf9b743863d45103d48c19245832627b04b07108 | refs/heads/master | 2023-06-26T21:52:15.113911 | 2021-07-26T21:20:21 | 2021-07-26T21:20:21 | 363,288,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | # Generated by Django 3.2 on 2021-04-26 23:06
import commerce.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('commerce', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Imagen',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagen', models.ImageField(upload_to=commerce.models.scramble_uploaded_filename)),
('descripcion', models.CharField(blank=True, max_length=50, null=True)),
('prioridad', models.IntegerField(blank=True, null=True)),
],
options={
'ordering': ('prioridad',),
},
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('categoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='commerce.categoria')),
('imagenes', models.ManyToManyField(blank=True, to='commerce.Imagen')),
],
),
]
| [
"netluxspa@gmail.com"
] | netluxspa@gmail.com |
eeccb59ddb3e905bd4d10a2eb35ada77ac381c05 | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/mcmc_alg_implementation_own_two_20180630104728.py | 576fd8873c53eb04f34d5536bd010e45f8a24b5c | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 4,187 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import random
import numpy as np
import graph_tool.all as gt
from pathlib import Path
def create_graph_views(district_total_no):
graph_views = list()
for i in range(district_total_no):
main_graph_view = gt.GraphView(graph)
graph_view_check = main_graph_view.new_vertex_property("bool")
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
graph_view_check[j] = True
graph_view = gt.GraphView(main_graph_view, vfilt=graph_view_check)
graph_views.append(graph_view)
return graph_views
def turn_off_edges(districts_graphs):
turned_off_graphs = list()
# Iterate through districts and selects random edges
for district in range(len(districts_graphs)):
to_delete = districts_graphs[district].new_edge_property('bool')
edges = districts_graphs[district].get_edges()
selected = edges[np.random.randint(edges.shape[0], size = len(edges)//3.5), :]
for i in selected:
to_delete[i] = True
turned_off_graphs.append(gt.GraphView(districts_graphs[district], efilt=to_delete))
return turned_off_graphs
def get_cp_boundaries(graph, turned_on_graphs):
cp_boundary = list()
for g in range(len(turned_on_graphs)):
cp_label, hist = gt.label_components(turned_on_graphs[g])
labels = set(cp_label.a)
for l in labels:
cp = gt.find_vertex(turned_on_graphs[g], cp_label, l)
label_boun = 0
for v in cp:
vertex_bound = False
for n in graph.vertex(v).all_neighbors():
for g_two in range(len(turned_on_graphs)):
if g == g_two:
continue
try:
turned_on_graphs[g_two].vertex(n)
except ValueError:
continue
else:
vertex_bound = True
break
if vertex_bound == True:
label_boun += 1
break
if label_boun == len(cp):
cp_boundary.append(cp)
return cp_boundary
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph.gt"))
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
cp_label = graph.new_vertex_property("int")
# Init variables
district_total_no = 2
gt.graph_draw(graph, pos=graph.vp.pos,
output=str(main_folder / ('tmp.png')),
bg_color=(255, 255, 255, 1), vertex_text=graph.vertex_index)
# Separates graph into blocks
districts = gt.minimize_blockmodel_dl(graph, district_total_no, district_total_no)
district_no = districts.get_blocks()
districts.draw(output='tmp.png', vertex_text=graph.vertex_index)
# Create the different graphs
districts_graphs = create_graph_views(district_total_no)
for i in range(len(districts_graphs)):
gt.graph_draw(
districts_graphs[i], pos=graph.vp.pos,
output=str(main_folder / ('tmp'+str(i)+'.png')),
bg_color=(255, 255, 255, 1))
turned_on_graphs = turn_off_edges(districts_graphs)
for i in range(len(districts_graphs)):
gt.graph_draw(
turned_on_graphs[i], pos=graph.vp.pos,bg_color=(255,255,255,1),vertex_size=2,
output=str(main_folder / ('tmp1'+str(i)+'.png')), vertex_text=graph.vertex_index)
labels_in_boundaries = get_cp_boundaries(graph, turned_on_graphs)
print(len(labels_in_boundaries))
slected_vertices = random.choice(labels_in_boundaries, k = 3)
print(len(slected_vertices)) | [
"gonzaleza@ripon.edu"
] | gonzaleza@ripon.edu |
23697655f0b003d48049aee403c5081f30a2e48b | 1109d81ac29335d7063557ee6d5bd2d9bda7a8d4 | /chap06_Regression/exams/linear_regression_exam.py | 855fbc53cfef92d1c318f3939acd731f1ea3346c | [] | no_license | yangmyongho/4_Python-II | 68c3b6400cda2c614d40d96166ff42c92fee29e0 | 0f7e488e034a1dac0438ad5c16ed435d30498d47 | refs/heads/master | 2022-11-28T00:36:45.243930 | 2020-07-29T14:08:45 | 2020-07-29T14:08:45 | 283,514,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,782 | py | # -*- coding: utf-8 -*-
"""
문) california 주택가격을 대상으로 다음과 같은 단계별로 선형회귀분석을 수행하시오.
"""
# california 주택가격 데이터셋
'''
캘리포니아 주택 가격 데이터(회귀 분석용 예제 데이터)
•타겟 변수
1990년 캘리포니아의 각 행정 구역 내 주택 가격의 중앙값
•특징 변수(8)
MedInc : 행정 구역 내 소득의 중앙값
HouseAge : 행정 구역 내 주택 연식의 중앙값
AveRooms : 평균 방 갯수
AveBedrms : 평균 침실 갯수
Population : 행정 구역 내 인구 수
AveOccup : 평균 자가 비율
Latitude : 해당 행정 구역의 위도
Longitude : 해당 행정 구역의 경도
'''
from sklearn.datasets import fetch_california_housing # dataset load
import pandas as pd # DataFrame 생성
from sklearn.linear_model import LinearRegression # model
from sklearn.model_selection import train_test_split # dataset split
from sklearn.metrics import mean_squared_error, r2_score # model 평가
import matplotlib.pyplot as plt
import numpy as np
# 캘리포니아 주택 가격 dataset load
california = fetch_california_housing()
print(california.DESCR)
# 단계1 : 특징변수와 타켓변수(MEDV)를 이용하여 DataFrame 생성하기
california.feature_names # 타겟명들
DF = pd.DataFrame(california.data, columns=california.feature_names )
DF
tg = pd.Series(california.target) # 집단변수
DF['MEDV'] = tg
DF
# 단계2 : 타켓변수와 가장 상관관계가 높은 특징변수 확인하기
cor = DF.corr()
cor.loc['MEDV']
'''
MedInc 0.688075
HouseAge 0.105623
AveRooms 0.151948
AveBedrms -0.046701
Population -0.024650
AveOccup -0.023737
Latitude -0.144160
Longitude -0.045967
MEDV 1.000000
''' # MedInc : 0.688075
# 단계3 : california 데이터셋을 대상으로 1만개 샘플링하여 서브셋 생성하기
'''
idx = np.random.choice(a=len(DF), size=10000)
idx
cal_data = DF.iloc[idx,:]
cal_data.shape # (10000, 9)
'''
cal_data = DF.sample(10000, random_state=123)
# 단계4 : 75%(train) vs 25(test) 비율 데이터셋 split
train, test = train_test_split(cal_data, random_state=123)
train.shape # (7500, 9)
test.shape #(2500, 9)
# 단계5 : 선형회귀모델 생성
obj = LinearRegression()
model = obj.fit(train.iloc[:, :8], train.loc[:,'MEDV'])
model
# 단계6 : 모델 검정(evaluation) : 예측력 검정, 과적합(overfitting) 확인
train_acc = model.score(train.iloc[:, :8], train.iloc[:, 8])
train_acc # 0.605786545196659
test_acc = model.score(test.iloc[:, :8], test.iloc[:, 8])
test_acc # 0.5885575812843817
# 해설 : 훈련셋과 검정셋 모두 비슷한 분류정확도가 나온다. -> 과적합 없다
# 단계7 : 모델 평가(test)
# 조건1) 단계3의 서브셋 대상으로 30% 샘플링 자료 이용
# 조건2) 평가방법 : MSE, r2_score
# df.sample()
subset = cal_data.sample(3000, random_state=123)
subset.shape # (3000, 9)
x_train, x_test = train_test_split(subset)
X = x_train.iloc[:, :8]
Y = x_train.iloc[:, 8]
X1 = x_test.iloc[:, :8]
Y1 = x_test.iloc[:, 8]
model2 = obj.fit(X, Y)
model2
y_pred = model.predict(X1)
y_true = Y1
MSE = mean_squared_error(y_true, y_pred)
MSE # 0.5526871790284673
score = r2_score(y_true, y_pred)
score # 0.5782616441222735
type(y_true)
type(y_pred)
y_true = np.array(y_true)
# 단계8 : 예측치 100개 vs 정답 100개 비교 시각화
plt.plot(y_true[:100], color='b', label='real values')
plt.plot(y_pred[:100], color='r', label='fitted values')
plt.xlabel('index')
plt.ylabel('fitted values')
plt.legend(loc = 'best')
plt.show()
| [
"noreply@github.com"
] | yangmyongho.noreply@github.com |
1de4f9c41e3447d40f2cac71a2bf89f5d3c2737d | 44cb69a5ea67e60289e33b3228b79d3c8fd36661 | /core/migrations/0093_historique_cnl_fiches.py | 5a81eb63bb8a302037b17b5b5d4185969dadb70a | [] | no_license | zedkaria-bel/ah_project | bc6d79acf3f419f9fdc45189d6b653ae84709e42 | 9dae7435ca6670006525eeda881fcea64c0557d1 | refs/heads/master | 2023-06-24T21:17:27.286727 | 2021-07-28T16:15:48 | 2021-07-28T16:15:48 | 381,800,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # Generated by Django 3.2 on 2021-06-13 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0092_historique_affect_quota'),
]
operations = [
migrations.CreateModel(
name='HISTORIQUE_CNL_FICHES',
fields=[
('date_last_cnl', models.DateTimeField(auto_now=True, db_column='DATE LAST CNL', primary_key=True, serialize=False)),
('fiches_from', models.PositiveIntegerField(blank=True, db_column='FROM', null=True)),
('fiches_until', models.PositiveIntegerField(blank=True, db_column='UNTIL', null=True)),
('cause', models.TextField(blank=True, db_column='CAUSE CNL', null=True)),
('obs', models.TextField(blank=True, db_column='OBS', null=True)),
],
options={
'db_table': 'HISTORIQUE_CNL_FICHES',
},
),
]
| [
"zaki.198@outlook.fr"
] | zaki.198@outlook.fr |
fe4fc0a2431cbdd855e0f89420f67a080e64d4f2 | 5b37d86af518b90cb848233c7f5f53befc15a5ed | /x_vectors/models/LDE.py | f4848fdfea56cd1b6a54cf8b55ef6c3bdae176a3 | [
"MIT"
] | permissive | taalua/x-vector-pytorch | 45fce3606eeb0b9a996179a1e0242d62e8393bcd | 7d86f78a1a70974df490ef7d2629de2d71dd1558 | refs/heads/master | 2023-07-21T04:47:45.596582 | 2021-08-25T17:58:58 | 2021-08-25T17:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | import torch
from torch import nn
import torch.nn.functional as F
class LDE(nn.Module):
def __init__(self, D, input_dim, with_bias=False, distance_type='norm', network_type='att', pooling='mean'):
"""LDE layer
"""
super(LDE, self).__init__()
self.dic = nn.Parameter(torch.randn(D, input_dim)) # input_dim by D (dictionary components)
nn.init.uniform_(self.dic.data, -1, 1)
self.wei = nn.Parameter(torch.ones(D)) # non-negative assigning weight in Eq(4) in LDE paper
if with_bias: # Eq(4) in LDE paper
self.bias = nn.Parameter(torch.zeros(D))
else:
self.bias = 0
assert distance_type == 'norm' or distance_type == 'sqr'
if distance_type == 'norm':
self.dis = lambda x: torch.norm(x, p=2, dim=-1)
else:
self.dis = lambda x: torch.sum(x**2, dim=-1)
assert network_type == 'att' or network_type == 'lde'
if network_type == 'att':
self.norm = lambda x: F.softmax(-self.dis(x) * self.wei + self.bias, dim = -2)
else:
self.norm = lambda x: F.softmax(-self.dis(x) * (self.wei ** 2) + self.bias, dim = -1)
assert pooling == 'mean' or pooling == 'mean+std'
self.pool = pooling
def forward(self, x):
#print(x.size()) # (B, T, F)
#print(self.dic.size()) # (D, F)
r = x.view(x.size(0), x.size(1), 1, x.size(2)) - self.dic # residaul vector
#print(r.size()) # (B, T, D, F)
w = self.norm(r).view(r.size(0), r.size(1), r.size(2), 1) # numerator without r in Eq(5) in LDE paper
#print(self.norm(r).size()) # (B, T, D)
#print(w.size()) # (B, T, D, 1)
w = w / (torch.sum(w, dim=1, keepdim=True) + 1e-9) #batch_size, timesteps, component # denominator of Eq(5) in LDE paper
if self.pool == 'mean':
x = torch.sum(w * r, dim=1) # Eq(5) in LDE paper
else:
x1 = torch.sum(w * r, dim=1) # Eq(5) in LDE paper
x2 = torch.sqrt(torch.sum(w * r ** 2, dim=1)+1e-8) # std vector
x = torch.cat([x1, x2], dim=-1)
return x.view(x.size(0), -1) | [
"ristohinno@gmail.com"
] | ristohinno@gmail.com |
ed1eb2d435c7405e2a444c9f298c172791d36066 | ed291071decb3514b7f9f321e68fd57fb3c11ebc | /Python/594_longest-harmonious-subsequence.py | 8455403cb0f23ad8ecc904e97ec7756a470bb9ad | [] | no_license | antonylu/leetcode2 | d7b1681cc9477bb01619be26461634edbb85a4e5 | a57282895fb213b68e5d81db301903721a92d80f | refs/heads/master | 2021-11-25T01:30:56.358849 | 2021-11-19T08:32:12 | 2021-11-19T08:32:12 | 130,139,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | """
https://leetcode.com/problems/longest-harmonious-subsequence/description/
We define a harmonious array is an array where the difference between its maximum value and its minimum value is exactly 1.
Now, given an integer array, you need to find the length of its longest harmonious subsequence among all its possible subsequences.
Example 1:
Input: [1,3,2,2,5,2,3,7]
Output: 5
Explanation: The longest harmonious subsequence is [3,2,2,2,3].
Note: The length of the input array will not exceed 20,000.
"""
class Solution(object):
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Approach #1, use collections.Counter()
#
# find continuous pair (i and i+1) and add these two counts
# find the largest sum
#
# O(n), 13%
from collections import Counter
c = Counter(nums)
ans = 0
for k,v in c.items():
if k+1 in c:
ans = max(ans, v+c[k+1])
return ans
if __name__ == '__main__':
s = Solution()
tc = [ [1,3,2,2,5,2,3,7] ]
ans = [ 5 ]
for i in range(len(tc)):
r = s.findLHS(tc[i])
print (r)
assert(r == ans[i])
| [
"w3back@gmail.com"
] | w3back@gmail.com |
49d2f10073d062a79cd03a3fbbd8320f30ec6ca1 | 685a1e32643bdcc3d1ba1fbd60521b5de4935e46 | /anillimaye/conftest.py | f5a93c9d28fd7088c06d2113c3f4b0671ec6b6fb | [] | no_license | beingtmk/anillimaye | 40a3213ec0f14c2d29f4481c860e418068a0137d | 7799b8b82c7e8c52301b29c8adf056767521f3bf | refs/heads/master | 2020-08-13T17:17:47.473380 | 2019-12-05T11:48:26 | 2019-12-05T11:48:26 | 215,006,896 | 1 | 1 | null | 2019-10-17T11:01:51 | 2019-10-14T09:50:50 | Python | UTF-8 | Python | false | false | 424 | py | import pytest
from django.conf import settings
from django.test import RequestFactory
from anillimaye.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> settings.AUTH_USER_MODEL:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
| [
"beingtmk@gmail.com"
] | beingtmk@gmail.com |
b930827b1fac2672459bbc2f93e7763e9622631d | 72c90301d4753c3d1534473196c6cb0b2f923bc8 | /tests/clientlib_test.py | 8e85e6c445f6488cad4b3951ea4bbf206afda340 | [
"MIT"
] | permissive | KevinHock/pre-commit | ec5ab3725fe6678b16abb0978a7414de9babba3f | ab47d08a38c67d6e974295fb58af753b4e8930ad | refs/heads/master | 2021-05-06T23:49:28.970479 | 2017-11-09T02:04:13 | 2017-11-09T02:04:34 | 110,043,358 | 3 | 2 | null | 2017-11-08T23:42:14 | 2017-11-08T23:42:14 | null | UTF-8 | Python | false | false | 6,418 | py | from __future__ import unicode_literals
import pytest
from pre_commit import schema
from pre_commit.clientlib import check_language
from pre_commit.clientlib import check_type_tag
from pre_commit.clientlib import CONFIG_HOOK_DICT
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import is_local_repo
from pre_commit.clientlib import MANIFEST_SCHEMA
from pre_commit.clientlib import validate_config_main
from pre_commit.clientlib import validate_manifest_main
from testing.util import get_resource_path
def is_valid_according_to_schema(obj, obj_schema):
try:
schema.validate(obj, obj_schema)
return True
except schema.ValidationError:
return False
@pytest.mark.parametrize('value', ('not a language', 'python3'))
def test_check_language_failures(value):
with pytest.raises(schema.ValidationError):
check_language(value)
@pytest.mark.parametrize('value', ('definitely-not-a-tag', 'fiel'))
def test_check_type_tag_failures(value):
with pytest.raises(schema.ValidationError):
check_type_tag(value)
@pytest.mark.parametrize('value', ('python', 'node', 'pcre'))
def test_check_language_ok(value):
check_language(value)
def test_is_local_repo():
assert is_local_repo({'repo': 'local'})
@pytest.mark.parametrize(
('args', 'expected_output'),
(
(['.pre-commit-config.yaml'], 0),
(['non_existent_file.yaml'], 1),
([get_resource_path('valid_yaml_but_invalid_config.yaml')], 1),
([get_resource_path('non_parseable_yaml_file.notyaml')], 1),
),
)
def test_validate_config_main(args, expected_output):
assert validate_config_main(args) == expected_output
@pytest.mark.parametrize(
('config_obj', 'expected'), (
([], False),
(
{'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'sha': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [{'id': 'pyflakes', 'files': '\\.py$'}],
}]},
True,
),
(
{'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'sha': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
'args': ['foo', 'bar', 'baz'],
},
],
}]},
True,
),
(
{'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'sha': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
# Exclude pattern must be a string
'exclude': 0,
'args': ['foo', 'bar', 'baz'],
},
],
}]},
False,
),
),
)
def test_config_valid(config_obj, expected):
ret = is_valid_according_to_schema(config_obj, CONFIG_SCHEMA)
assert ret is expected
def test_config_with_local_hooks_definition_fails():
config_obj = {'repos': [{
'repo': 'local',
'sha': 'foo',
'hooks': [{
'id': 'do_not_commit',
'name': 'Block if "DO NOT COMMIT" is found',
'entry': 'DO NOT COMMIT',
'language': 'pcre',
'files': '^(.*)$',
}],
}]}
with pytest.raises(schema.ValidationError):
schema.validate(config_obj, CONFIG_SCHEMA)
@pytest.mark.parametrize(
'config_obj', (
{'repos': [{
'repo': 'local',
'hooks': [{
'id': 'arg-per-line',
'name': 'Args per line hook',
'entry': 'bin/hook.sh',
'language': 'script',
'files': '',
'args': ['hello', 'world'],
}],
}]},
{'repos': [{
'repo': 'local',
'hooks': [{
'id': 'arg-per-line',
'name': 'Args per line hook',
'entry': 'bin/hook.sh',
'language': 'script',
'files': '',
'args': ['hello', 'world'],
}],
}]},
),
)
def test_config_with_local_hooks_definition_passes(config_obj):
schema.validate(config_obj, CONFIG_SCHEMA)
def test_config_schema_does_not_contain_defaults():
"""Due to the way our merging works, if this schema has any defaults they
will clobber potentially useful values in the backing manifest. #227
"""
for item in CONFIG_HOOK_DICT.items:
assert not isinstance(item, schema.Optional)
@pytest.mark.parametrize(
('args', 'expected_output'),
(
(['.pre-commit-hooks.yaml'], 0),
(['non_existent_file.yaml'], 1),
([get_resource_path('valid_yaml_but_invalid_manifest.yaml')], 1),
([get_resource_path('non_parseable_yaml_file.notyaml')], 1),
),
)
def test_validate_manifest_main(args, expected_output):
assert validate_manifest_main(args) == expected_output
@pytest.mark.parametrize(
('manifest_obj', 'expected'),
(
([], False),
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': r'\.py$',
}],
True,
),
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'language_version': 'python3.4',
'files': r'\.py$',
}],
True,
),
(
# A regression in 0.13.5: always_run and files are permissible
# together (but meaningless). In a future version upgrade this to
# an error
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': '',
'always_run': True,
}],
True,
),
),
)
def test_valid_manifests(manifest_obj, expected):
ret = is_valid_according_to_schema(manifest_obj, MANIFEST_SCHEMA)
assert ret is expected
| [
"asottile@umich.edu"
] | asottile@umich.edu |
8e22fb42f90d570c97b11d0307980d6995e1e0d3 | fbc29f0d9d0d6ba0c76c57d91cfad4a4cfa97932 | /Utility Scripting and System Administration/Finding_Files_by_Name.py | b598b8cbb5d1378a4e37cc66ab01eb681e2ac70e | [] | no_license | Lisolo/Python-Cookbook | b453b309eb9d4b9af644d35e8ec9f8ad31c091c1 | e9dd792c32624899bad43fa0f82bdb89f2422e0e | refs/heads/master | 2016-09-06T02:15:33.729560 | 2014-10-24T14:00:24 | 2014-10-24T14:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | # coding=utf-8
"""
Problem
You need to write a script that involves finding files, like a file renaming script or a log archiver utility,
but you’d rather not have to call shell utilities from within your Python script,
or you want to provide specialized behavior not easily available by "shelling out."
Solution
To search for files, use the os.walk() function, supplying it with the top-level directory.
Here is an example of a function that finds a specific filename and prints out the full path of all matches:
"""
#!/usr/bin/env python3.3
import os
import sys
def findfile(start, name):
for relpath, dirs, files in os.walk(start):
if name in files:
full_path = os.path.join(start, relpath, name)
print(os.path.normpath(os.path.abspath(full_path)))
if __name__ == '__main__':
findfile(sys.argv[1], sys.argv[2])
"""
Save this script as findfile.py and run it from the command line,
feeding in the starting point and the name as positional arguments, like this:
bash % ./findfile.py . myfile.txt
"""
"""
Discussion
The os.walk() method traverses the directory hierarchy for us, and for each directory it enters, it returns a 3-tuple,
containing the relative path to the directory it’s inspecting, a list containing all of the directory names in that directory,
and a list of filenames in that directory.
For each tuple, you simply check if the target filename is in the files list.
If it is, os.path.join() is used to put together a path. To avoid the possibility of weird looking paths like ././foo//bar,
two additional functions are used to fix the result. The first is os.path.abspath(),
which takes a path that might be relative and forms the absolute path, and the second is os.path.normpath(),
which will normalize the path, thereby resolving issues with double slashes, multiple references to the current directory, and so on.
Although this script is pretty simple compared to the features of the find utility found on UNIX platforms,
it has the benefit of being cross-platform. Furthermore,
a lot of additional functionality can be added in a portable manner without much more work. To illustrate,
here is a function that prints out all of the files that have a recent modification time:
#!/usr/bin/env python3.3
import os
import sys
import time
def modified_within(top, seconds):
now = time.time()
for path, dirs, files in os.walk(top):
for name in files:
fullpath = os.path.join(path, name)
if os.path.exists(fullpath):
mtime = os.path.getmtime(fullpath)
if mtime > (now - seconds):
print(fullpath)
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: {} dir seconds'.format(sys.argv[0]))
raise SystemExit(1)
modified_within(sys.argv[1], float(sys.argv[2]))
"""
"""
It wouldn’t take long for you to build far more complex operations on top of this little function using various features of the os,
os.path, glob, and similar modules. See Recipes and for related recipes.
""" | [
"iamsoloa@gmail.com"
] | iamsoloa@gmail.com |
9fc4afdce1677636caaab75e822230cf6fee86fa | abba8b8b92125735ebff2f5f783870f80c27bd1f | /restful/hawkeye/sqlaudit/migrations/0016_audit_job_order_by.py | f5771775bdf3af5e19c16127cc0d362596261fd1 | [] | no_license | zsprn123/yunqu | 25a5463aaece2d3f8749c6ef588ad4fcb3651360 | af43f8b42129be5be82db2607c40480028057273 | refs/heads/master | 2022-12-22T04:37:59.989122 | 2018-08-30T07:59:41 | 2018-08-30T07:59:41 | 146,715,198 | 0 | 2 | null | 2022-12-08T00:45:43 | 2018-08-30T07:52:07 | Roff | UTF-8 | Python | false | false | 747 | py | # uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57)
# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]
# Embedded file name: ./sqlaudit/migrations/0016_audit_job_order_by.py
# Compiled at: 2018-08-23 19:33:14
# Size of source mod 2**32: 482 bytes
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sqlaudit', '0015_audit_rule_modifiable')]
operations = [
migrations.AddField(model_name='audit_job',
name='order_by',
field=models.CharField(blank=True, max_length=100, null=True))]
# okay decompiling ./restful/hawkeye/sqlaudit/migrations/0016_audit_job_order_by.pyc
| [
"you@example.com"
] | you@example.com |
cd22488a350f2b43cb16bd6a6f611cf67a94ffae | b39b0625795b0640a6a68151f2012ce139f423b8 | /iaas/swagger_client/models/about.py | 334b7e2cf81e71d74af5081dceae3c76a5550981 | [] | no_license | darrylcauldwell/casCodegen | 8e82b1f08e8260482996aec3d8be10934a65dd03 | 1f1ff9ab8a33102bcfcb8be276d51992d96bcb61 | refs/heads/master | 2020-07-27T14:42:28.550855 | 2019-09-17T18:30:28 | 2019-09-17T18:30:28 | 209,127,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,599 | py | # coding: utf-8
"""
VMware Cloud Assembly IaaS API
A multi-cloud IaaS API for Cloud Automation Services # noqa: E501
OpenAPI spec version: 2019-01-15
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.api_description import ApiDescription # noqa: F401,E501
class About(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supported_apis': 'list[ApiDescription]',
'latest_api_version': 'str'
}
attribute_map = {
'supported_apis': 'supportedApis',
'latest_api_version': 'latestApiVersion'
}
def __init__(self, supported_apis=None, latest_api_version=None): # noqa: E501
"""About - a model defined in Swagger""" # noqa: E501
self._supported_apis = None
self._latest_api_version = None
self.discriminator = None
self.supported_apis = supported_apis
self.latest_api_version = latest_api_version
@property
def supported_apis(self):
"""Gets the supported_apis of this About. # noqa: E501
A collection of all currently supported api versions. # noqa: E501
:return: The supported_apis of this About. # noqa: E501
:rtype: list[ApiDescription]
"""
return self._supported_apis
@supported_apis.setter
def supported_apis(self, supported_apis):
"""Sets the supported_apis of this About.
A collection of all currently supported api versions. # noqa: E501
:param supported_apis: The supported_apis of this About. # noqa: E501
:type: list[ApiDescription]
"""
if supported_apis is None:
raise ValueError("Invalid value for `supported_apis`, must not be `None`") # noqa: E501
self._supported_apis = supported_apis
@property
def latest_api_version(self):
"""Gets the latest_api_version of this About. # noqa: E501
The latest version of the API in yyyy-MM-dd format (UTC). # noqa: E501
:return: The latest_api_version of this About. # noqa: E501
:rtype: str
"""
return self._latest_api_version
@latest_api_version.setter
def latest_api_version(self, latest_api_version):
"""Sets the latest_api_version of this About.
The latest version of the API in yyyy-MM-dd format (UTC). # noqa: E501
:param latest_api_version: The latest_api_version of this About. # noqa: E501
:type: str
"""
if latest_api_version is None:
raise ValueError("Invalid value for `latest_api_version`, must not be `None`") # noqa: E501
self._latest_api_version = latest_api_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(About, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, About):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"dcauldwell@dcauldwell-a01.vmware.com"
] | dcauldwell@dcauldwell-a01.vmware.com |
d53fe1614b6fb99c58bfcf7d574886de8ea4f545 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/analyzes/machineEngine/roomtype/models.py | 8ad4ce9eb8cfffd190d4ad6c57654fa3fdbbccf6 | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 690 | py | import datetime
from mongoengine import Document
from mongoengine import StringField
from mongoengine import DateTimeField
from mongoengine import ObjectIdField
from mongoengine import ReferenceField
from modules.organization.models import Organization
class EngineRoomType(Document):
_created_date = DateTimeField(default=datetime.datetime.utcnow)
_key_created_user = ObjectIdField()
_last_modified_date = DateTimeField(default=datetime.datetime.utcnow)
_key_last_modified_user = ObjectIdField()
_key_owner_user = ObjectIdField()
_key_organization = ReferenceField(Organization, required=True, reverse_delete_rule=2)
room_type = StringField(required=True)
| [
"mutlu.erdem@soft-nec.com"
] | mutlu.erdem@soft-nec.com |
1778ed78624eff11d3904af6036e7ff72823d4e4 | 2354fbbc1b6497d3a5f78e12783fe760e43f99fb | /LeetCode Problems/Design/Insert Delete GetRandom.py | ea5730b4603d533c4ca4a9e6a4381eeeaf9d90af | [] | no_license | GZHOUW/Algorithm | 34ee3650a5fad1478fb3922ea69ccafc134520c9 | 7eddbc93a237d1d5cabcdc67806b01ff55ea8562 | refs/heads/master | 2021-03-27T07:57:31.247576 | 2021-01-06T19:53:38 | 2021-01-06T19:53:38 | 247,803,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | '''
Design a data structure that supports all following operations in average O(1) time.
insert(val): Inserts an item val to the set if not already present.
remove(val): Removes an item val from the set if present.
getRandom: Returns a random element from current set of elements. Each element must have the same probability of being returned.
Example:
// Init an empty set.
RandomizedSet randomSet = new RandomizedSet();
// Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomSet.insert(1);
// Returns false as 2 does not exist in the set.
randomSet.remove(2);
// Inserts 2 to the set, returns true. Set now contains [1,2].
randomSet.insert(2);
// getRandom should return either 1 or 2 randomly.
randomSet.getRandom();
// Removes 1 from the set, returns true. Set now contains [2].
randomSet.remove(1);
// 2 was already in the set, so return false.
randomSet.insert(2);
// Since 2 is the only number in the set, getRandom always return 2.
randomSet.getRandom();
'''
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.s = set()
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val not in self.s:
self.s.add(val)
return True
else:
return False
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.s:
self.s.remove(val)
return True
else:
return False
def getRandom(self):
"""
Get a random element from the set.
"""
idx = 0
randIdx = random.randint(0, len(self.s)-1) # if only one number, randIdx must be 0
for number in self.s:
if idx == randIdx:
return number
idx += 1
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| [
"noreply@github.com"
] | GZHOUW.noreply@github.com |
66d7e2a9ecbd1f2f21935027597070475f279214 | 2729999511025ae93a46a402e8611000e63fc5b8 | /apps/useradmin/src/useradmin/organization.py | 3f34820b743163d309a0ae770f5d94bf9d0b919d | [
"Apache-2.0"
] | permissive | happydentist/hue | 19cb3abfa42e70844ef609b346c195e3a99a48b0 | 9928284e284f9a0586bd2080932f4b25bb5d8708 | refs/heads/master | 2020-12-27T10:40:28.312883 | 2020-01-31T03:14:18 | 2020-01-31T15:45:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from crequest.middleware import CrequestMiddleware
from desktop.conf import ENABLE_ORGANIZATIONS
def default_organization():
from useradmin.models import Organization
default_organization, created = Organization.objects.get_or_create(name='default', domain='default')
return default_organization
def get_user_request_organization():
request = CrequestMiddleware.get_request()
return request.user.organization if request and hasattr(request, 'user') and request.user.is_authenticated() else default_organization()
def _fitered_queryset(queryset, by_owner=False):
request = CrequestMiddleware.get_request()
# Avoid infinite recursion on very first retrieval of the user
if ENABLE_ORGANIZATIONS.get() and \
request and hasattr(request, 'user') and hasattr(request.user, '_wrapped') and type(request.user._wrapped) is not object and \
request.user.is_authenticated():
if by_owner:
filters = {'owner__organization': request.user.organization}
else:
filters = {'organization': request.user.organization}
queryset = queryset.filter(**filters)
return queryset
| [
"romain.rigaux@gmail.com"
] | romain.rigaux@gmail.com |
b5367669021499867b783804a735c7fee14b1986 | 90a1aa497ec53fa87bc31cd5101ad55adb22cddb | /cython/basic_demo/main.py | 76bf101c1dca320a8313f60a8bac0c50e6eafd19 | [] | no_license | ybdesire/pylearn | 39821e3e5cb61c021afc7af2052e0de7077961e2 | 400e525c0529bea6da74aab9bc86fe5e26549d32 | refs/heads/master | 2023-02-04T02:08:44.352846 | 2023-01-28T09:28:34 | 2023-01-28T09:28:34 | 79,337,563 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # environment: linx
import time
import pyximport
pyximport.install(
reload_support=True)
import calc
start_time = time.time()
for i in range(10000000):
calc.fun(i)
print('costed {} seconds'.format(time.time() - start_time))
| [
"ybdesire@gmail.com"
] | ybdesire@gmail.com |
ac01c55d0bebdd3d33f68faffffadc3fbd7e7962 | 60fedc4e31e70dfbec2542751aa0a02ba21a54a3 | /tests/test_registrar_name/test_authenticate.py | 56611d3122796d56549e0656e85ec682d4ccc038 | [
"MIT"
] | permissive | Robpol86/UnofficialDDNSnix | 791c2b1f801918c775e2ebc228a3385a253a66ae | de3e63927da5e76b685aeeabdb43ef36ecefbba5 | refs/heads/master | 2021-08-19T05:14:59.026176 | 2016-01-09T05:32:05 | 2016-01-09T05:32:05 | 15,039,836 | 3 | 3 | MIT | 2020-04-17T07:46:53 | 2013-12-09T06:25:00 | Python | UTF-8 | Python | false | false | 7,437 | py | #!/usr/bin/env python2.6
import textwrap
import pytest
import time
from tests.test_registrar_name.test_request_json import initialize_simulation
def _heavy_lifting(response, log_file, session, expected_exc, capsys, stdout_expected, stderr_expected, log_expected):
initialize_simulation(response)
with open(log_file.name, 'r') as f:
f.seek(0, 2)
log_before_pos = f.tell()
with pytest.raises(session.RegistrarException) as e:
session.authenticate()
assert expected_exc == str(e.value)
stdout_actual, stderr_actual = capsys.readouterr()
assert stdout_expected == stdout_actual
assert stderr_expected == stderr_actual
with open(log_file.name, 'r') as f:
f.seek(log_before_pos)
log_actual = f.read(10240)
assert log_expected == log_actual
def test_authenticate_missing_json_key(session, log_file, capsys):
response = '{"result":{"code":100,"message":"Command Successful"},"bar":["baz", null, 1.0, 2]}'
json = "{u'bar': [u'baz', None, 1.0, 2], u'result': {u'message': u'Command Successful', u'code': 100}}"
expected_exc = "'session_token' not in JSON."
stdout_expected = textwrap.dedent("""\
Method authenticate start.
Opening connection to {url}
Response: {response}
JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.authenticate Method authenticate start.
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.authenticate Response: {response}
{ts} DEBUG registrar_base.authenticate JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json, ts=timestamp))
_heavy_lifting(response, log_file, session, expected_exc, capsys, stdout_expected, stderr_expected, log_expected)
def test_authenticate_missing_json_value(session, log_file, capsys):
response = '{"result":{"code":100,"message":"Command Successful"},"bar":["baz", null, 1.0, 2], "session_token":""}'
json = "{u'bar': [u'baz', None, 1.0, 2], u'result': {u'message': u'Command Successful', u'code': 100}, u'session_token': u''}"
expected_exc = "'session_token' is invalid."
stdout_expected = textwrap.dedent("""\
Method authenticate start.
Opening connection to {url}
Response: {response}
JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.authenticate Method authenticate start.
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.authenticate Response: {response}
{ts} DEBUG registrar_base.authenticate JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json, ts=timestamp))
_heavy_lifting(response, log_file, session, expected_exc, capsys, stdout_expected, stderr_expected, log_expected)
def test_authenticate_invalid_json_value(session, log_file, capsys):
response = '{"result":{"code":100,"message":"Command Successful"},"bar":["baz", null, 1.0, 2], "session_token":"127..0.1"}'
json = "{u'bar': [u'baz', None, 1.0, 2], u'result': {u'message': u'Command Successful', u'code': 100}, u'session_token': u'127..0.1'}"
expected_exc = "'session_token' is invalid."
stdout_expected = textwrap.dedent("""\
Method authenticate start.
Opening connection to {url}
Response: {response}
JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.authenticate Method authenticate start.
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.authenticate Response: {response}
{ts} DEBUG registrar_base.authenticate JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json, ts=timestamp))
_heavy_lifting(response, log_file, session, expected_exc, capsys, stdout_expected, stderr_expected, log_expected)
def test_authenticate_bad_credentials(session, log_file, capsys):
response = '{"result":{"code":221,"message":"Authorization Error - Username Or Ip Token Invalid"}}'
json = "{u'result': {u'message': u'Authorization Error - Username Or Ip Token Invalid', u'code': 221}}"
expected_exc = "Authorization Error or invalid username and/or password."
stdout_expected = textwrap.dedent("""\
Method authenticate start.
Opening connection to {url}
Response: {response}
JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.authenticate Method authenticate start.
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_name._request_json Response: {response}
{ts} DEBUG registrar_name._request_json JSON: {json}
""".format(url="http://127.0.0.1/login", response=response, json=json, ts=timestamp))
_heavy_lifting(response, log_file, session, expected_exc, capsys, stdout_expected, stderr_expected, log_expected)
# noinspection PyProtectedMember
def test_authenticate_success(session, log_file, capsys):
response = '{"result":{"code":100,"message":"Command Successful"},"session_token":"2352e5c5a0127d2155377664a5543f22a70be187"}'
json = "{u'client_ip': u'127.0.0.1', u'service': u'Name.com API Test Server', u'language': u'en', u'version': u'2.0', u'result': {u'message': u'Command Successful', u'code': 100}, u'server_date': u'2013-12-28 04:46:38'}"
expected_token = "2352e5c5a0127d2155377664a5543f22a70be187"
stdout_expected = textwrap.dedent("""\
Method authenticate start.
Opening connection to {url}
Method authenticate end.
""".format(url="http://127.0.0.1/login", response=response, json=json))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.authenticate Method authenticate start.
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.authenticate Method authenticate end.
""".format(url="http://127.0.0.1/login", response=response, json=json, ts=timestamp))
initialize_simulation(response)
with open(log_file.name, 'r') as f:
f.seek(0, 2)
log_before_pos = f.tell()
session.authenticate()
assert expected_token == session._session_token
stdout_actual, stderr_actual = capsys.readouterr()
assert stdout_expected == stdout_actual
assert stderr_expected == stderr_actual
with open(log_file.name, 'r') as f:
f.seek(log_before_pos)
log_actual = f.read(10240)
assert log_expected == log_actual | [
"robpol86@gmail.com"
] | robpol86@gmail.com |
8d628b240c470b3b6d14c99e92892957c4e19fec | ac33e7a30131db58f0e72c9bf1f79cd34a38d335 | /manufacturing/doctype/stability_study_report_child/stability_study_report_child.py | db1427f55fb00191d9423c79a8302e72878ad791 | [] | no_license | mbhavesh95863/erpnext | 395d545292c67cc5d6d7be3029d03245c754d984 | d6c490e4a404235abe9b4d541de1bbb53ba32949 | refs/heads/master | 2020-03-26T20:03:45.620397 | 2018-08-19T12:46:43 | 2018-08-19T12:46:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StabilityStudyReportChild(Document):
pass
| [
"erpnextdeveloper1@gmail.com"
] | erpnextdeveloper1@gmail.com |
ad498b8d3ca18f5d619331ca3bb62c7b5c9be603 | ca44cdd205d27fc5cfabaaa349e93afddd7c902b | /hm3/hw3_other_tasks.py | 2462346a4756a4a7109758d5642e65012f7a255f | [] | no_license | SOFIAshyn/BaseProgramming_course_Basic_Python | 8402b7c2eff570e7102ba1f9b0b6636a6f0b881a | cf4d0d204a836367ee51e329828a53072aef20e9 | refs/heads/master | 2021-10-21T08:02:35.611635 | 2019-03-03T15:46:59 | 2019-03-03T15:46:59 | 173,553,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | def is_power_of_two(val):
"""
(int) -> bool
Determine if a number is a power of two.
>>> is_power_of_two([0])
>>> is_power_of_two("0")
>>> is_power_of_two(0)
False
>>> is_power_of_two(1)
True
>>> is_power_of_two(2)
True
>>> is_power_of_two(15)
False
>>> is_power_of_two(16)
True
"""
try:
if val > 0 and val % 2 == 0 or val == 1:
while val != 2:
if val % 2 == 0:
val /= 2
else:
break
if val == 2 or val == 1:
return True
else:
return False
else:
return False
except TypeError as err:
return None
print(is_power_of_two(24))
#
#
# def has_unique_chars(string):
# """
# (str) -> bool
#
# An algorithm to determine if a string has all unique characters.
#
# >>> has_unique_chars(None)
# False
# >>> has_unique_chars('')
# True
# >>> has_unique_chars('foo')
# False
# >>> has_unique_chars('bar')
# True
# """
# if type(string) == str:
# for letter in string:
# if string.count(letter) > 1:
# return False
# return True
# else:
# return False
#
# print(has_unique_chars("foo"))
#
# def compress(string):
# """
# (str) -> str
#
# Compress a string such that 'AAABCCDDDD' becomes 'A3BC2D4'.
# Only compress the string if it saves space.
#
# >>> compress(None)
#
# >>> compress('')
# ''
# >>> compress('AABBCC')
# 'AABBCC'
# >>> compress('AAABCCDDDDE')
# 'A3BC2D4E'
# >>> compress('BAAACCDDDD')
# 'BA3C2D4'
# >>> compress('AAABAACCDDDD')
# 'A3BA2C2D4'
# """
# if type(string) == str:
# prev = ""
# count = 1
# new_str = ""
# for letter in string + " ":
# if letter == prev:
# count += 1
# else:
# if count != 1:
# new_str += prev + str(count)
# else:
# new_str += prev
# count = 1
# prev = letter
# if len(new_str) == len(string):
# new_str = string
# return new_str
# else:
# return None
#
# print(compress(None))
#
#
import doctest
doctest.testmod()
| [
"sophiya2petryshyn@gmail.com"
] | sophiya2petryshyn@gmail.com |
f857702b7d0e2829e1575beb8990cbe95f3afd23 | 1d103214adcd3d7834ec85308e14c160df14c5f0 | /pykrylov/irlba.py | b901af8195e6411ff255e6994344609483800a65 | [] | no_license | ericmjonas/pykrylov | c41a22d82c345d3223cac9bd85e5ddfd89d8fe92 | b1022dbf07a9be601a2c23c285175281a2c48ded | refs/heads/master | 2020-05-16T20:30:14.301575 | 2014-07-29T21:20:27 | 2014-07-29T21:20:27 | 22,395,012 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,951 | py | import numpy as np
import scipy.linalg
import util
import lanczos
from util import norm
EPS = np.finfo(float).eps
def ordered_svd(X):
"""
returns the svd ordered by singular value largest to smallest
"""
u, s, v = np.linalg.svd(X)
if (np.sort(s) != s[::-1]).any():
raise Exception("NTO SORTED");
return u, s, v.T
def irlba(A, K=6, largest=True, adjust = 3, aug=None, disps=0, maxit=1000, m_b=20,
reorth_two = False, tol=1e-6, V0=None):
"""
This is a port of IRLBA.m following the code there very closely
"""
# FIXME do interchange stuff
m, n = A.dims()
# interchange m and n so that size(A*A) = min(m, n)
# avoids finding zero values when searching for the smallest singular values
interchange = False
if n > m and largest == False:
t = m
m = n
n = t
interchange = True
raise Exception("Don't do interchange yet")
W = np.zeros((m, m_b))
F = np.zeros((n, 1))
V = np.zeros((n, m_b)) # Preallocate for V
if V0 == None:
V[:, 0] = np.arange(1, n+1)
V[:, 0] = V[:, 0] / np.sum(V[:, 0]) # np.random.normal(0, 1, n)
else:
V[:, :V0.shape[1]] = V0
# increase the number of desired values by adjust to help increase convergence.
# K is re-adjusted as vectors converged. This is only an initial value of K
K_org = K
K += adjust
# sanity checking for input values
if K <= 0:
raise Exception("K must be a positive Value")
if K > min(n, m):
raise Exception("K must be less than min(n, m) + %d" % adjust)
if m_b <= 1:
raise Exception("M_B must be > 1")
# FIXME clean up parameters A LOT
if aug == None:
if largest == True:
aug = 'RITZ'
else:
aug = 'HARM'
# set tolerance to machine precision
tol = max(tol, EPS)
# begin initialization
B = []
Bsz = []
conv = False
EPS23 = EPS**(2/3.0)
iteration = 0
J = 0
mprod = 0
R_F = []
SQRTEPS = np.sqrt(EPS)
Smax = 1 # holds the maximum value of all computed singular values of B est. ||A||_2
Smin = [] # holds the minimum value of all computed singular values of B est. cond(A)
SVTol = min(SQRTEPS, tol) # tolerance to determine whether a singular value has converged
S_B = [] # singular values of B
U_B = [] # Left singular vectors of B
V_B = [] # right singular vectors of B
V_B_last = [] # holds the last row of the modified V_B
S_B2 = [] # singular values of [B ||F||]
U_B2 = [] # left singular vectors of [B ||F||]
V_B2 = [] # right signular vectors of [b || F||]
while iteration < maxit:
V, W, F, B, mprod = lanczos.ablanzbd(A, V, W, F, B, K,
interchange, m_b, n, m, SVTol*Smax,
reorth_two, iteration)
# determine the size of the bidiagonal matrix B
Bsz = B.shape[0]
# compute the norm of the vector F, and normalize F
R_F = norm(F)
F = F/R_F
# compute singular triplets of B
U_B, S_B, V_B = ordered_svd(B)
# estimate ||A|| using the largest singular value ofer all
# all iterations and estimate the cond(A) using approximations
# to the largest and smallest singular values. If a small singular value
# is less than sqrteps use only Rtiz vectors
# to augment and require two-sided reorthogonalization
if iteration == 0:
Smax = S_B[0];
Smin = S_B[-1]
else:
Smax = max(Smax, S_B[0])
Smin = min(Smin, S_B[-1])
Smax = max(EPS23, Smax)
if Smin/Smax < SQRTEPS:
reorth_two = True
aug = 'RITZ'
# re-order the singular values if we're looking for the smallest ones
if not largest:
U_B = U_B[:, ::-1]
S_B = S_B[::-1]
V_B = V_B[:, ::-1]
# compute the residuals
R = np.dot(R_F, U_B[-1,:])
# convergest tests and displays
conv, U_B, S_B, V_B, K = convtests(Bsz, disps, tol, K_org, U_B, S_B, V_B,
abs(R), iteration,
K, SVTol, Smax)
if conv: # all singular values within tolerance, return !
break # yay
if iteration > maxit:
break # boo
# compute starting vectors and first block:
if aug == "HARM":
# update the SVD of B to be the SVD of [B ||F||F E_m]
U_B2, S_B, V_B2 = ordered_svd(np.c_[np.diag(S_B), R.T])
if not largest:
# pick the last ones
U_B2 = U_B2[:, :Bsz]
V_B2 = V_B2[:, :Bsz]
S_B = S_B[:Bsz]
U_B2 = U_B2[:, ::-1]
S_B = S_B[::-1]
V_B2 = V_B2[:, ::-1]
# jesus christ
U_B = np.dot(U_B, U_B2)
VB_D = np.zeros((V_B.shape[0]+1, V_B.shape[1]+1))
VB_D[:-1, :-1] = V_B
VB_D[-1, -1] = 1.0
V_B = np.dot(VB_D, V_B2)
V_B_last = V_B[-1, :K] # the last row of V_B
int_v = scipy.linalg.solve(B, np.flipud(np.eye(Bsz, 1)))
s = np.dot(R_F, int_v)
V_B = V_B[:Bsz, :] + s*V_B[Bsz:, :]
# vectors are not orthogonal
VB_D = np.zeros((V_B.shape[0] +1, K+1))
VB_D[:-1, :K] = V_B[:, :K]
VB_D[:-1, K] = -s.T
VB_D[-1, -1] = 1.0
V_B, R = np.linalg.qr(VB_D)
V[:, :(K+1)] = np.dot(np.c_[V, F], V_B)
# upate and compute the K x K+1 part of B
w0 = np.outer(R[:, K], V_B_last)
w = np.triu((R[:K+1, :K] + w0).T)
B = np.dot(np.diag(S_B[:K]), w)
else:
V[:, :K] = np.dot(V, V_B[:, :K])
V[:, K] = F
B = np.c_[np.diag(S_B[:K]), R[:K]]
# compute left approximate singular values
W[:, :K] = np.dot(W, U_B[:, :K])
iteration += 1
# results
if interchange:
u = np.dot(V, V_B[:, :K_org])
s = S_B[:K_org]
v = np.dot(W, U_B[:, :K_org])
else:
u = np.dot(W, U_B[:, :K_org])
s = S_B[:K_org]
v = np.dot(V, V_B[:, :K_org])
return u, s, v
def convtests(Bsz, disps, tol, K_org, U_B, S_B, V_B,
residuals, iter, K, SVTol, Smax):
converged = False
len_res = np.sum(residuals[:K_org] < (tol*Smax))
if len_res == K_org:
return True, U_B[:, :K_org], S_B[:K_org], V_B[:, :K_org], K
else:
len_res = np.sum(residuals[:K_org] < (SVTol*Smax))
K = max(K, K_org + len_res)
if K > Bsz-3:
K = Bsz-3
return False, U_B, S_B, V_B, K
| [
"jonas@ericjonas.com"
] | jonas@ericjonas.com |
f4936ca835895113cefad9aea979f1f903045068 | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/systemBus/appsLaunchedRecorder/002_markLaunched.py | 838803454ac39531735ca298607c545687acf651 | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | # -*- coding: utf-8 -*-
# ***************************************************
# @Test Case ID: 002_markLaunched
# @Test Description: MarkLaunched(string desktopFile)
# 标记某个应用是否启动过
# 参数
# desktopFile: 标记该应用启动过,标记以后该应用的id就不会出现在GetNew函数返回的结构中
# 返回
# 无
# @Test Condition: 1.无
# @Test Step: 1.调用 GetNew 函数,获取已经安装但从未打开使用过的应用列表
# 2.存在从未打开过应用,调用MarkLaunched标记第一个应用,不存在则标记任意一个已存在应用,如:dde-file-manager
# @Test Result: 2.调用 GetNew 函数,获取已经安装但从未打开使用过的应用列表,被标记应用不在列表中或调用成功无报错
# @Test Remark: 只有通过launcher卸载的应用再次安装才会在从未打开使用过的应用列表中,暂无方案通过代码模拟这一过程,200901
# @Author: ut001627
# ***************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.systemBus import appsLaunchedRecorder
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:调用 GetNew 函数,获取已经安装但从未打开使用过的应用列表")
apps_list = appsLaunchedRecorder.get_all_new_apps()
self.Step("步骤2:存在从未打开过应用,调用MarkLaunched标记第一个应用,不存在则标记任意一个已存在应用,如:dde-file-manager")
if apps_list:
appsLaunchedRecorder.markLaunched(apps_list[0])
time.sleep(5)
self.CheckPoint("调用 GetNew 函数,获取已经安装但从未打开使用过的应用列表,被标记应用不在列表中")
assert appsLaunchedRecorder.is_new_apps(apps_list[0], target=False)
else:
self.CheckPoint("调用成功无报错")
appsLaunchedRecorder.markLaunched('/usr/share/applications/dde-file-manager.desktop')
def tearDown(self):
self.Step("收尾:无")
| [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
9c51cf3d66f1cbc81907c80eb29c81a8f1ffddfe | 3af8bd42cbf1f3a6f275cc7f5299a643511b56ff | /sentiment_analysis/bert/scripts/main.py | 41e2bc53361220044c117ee01f7a1306b2b0d166 | [] | no_license | shravanc/msc_project | d54fbf6fda764038ca52d113ec5b582212f9a5bd | 9d815e2130a9c4c2ad9286a8f3471c2bf860ca93 | refs/heads/master | 2022-12-13T21:59:51.269615 | 2020-09-08T10:50:55 | 2020-09-08T10:50:55 | 276,747,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,291 | py | import os
import math
import datetime
from tqdm import tqdm
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import bert
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
from bert.tokenization.bert_tokenization import FullTokenizer
import seaborn as sns
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
train_base_dir = "/home/shravan/Downloads/train/"
valid_base_dir = "/home/shravan/Downloads/valid/"
train_count = 11
def load_datasets():
train_df = pd.DataFrame()
for name in os.listdir(train_base_dir):
file_path = os.path.join(train_base_dir, name)
train_df = pd.concat([train_df,
pd.read_csv(file_path, sep=',', names=["sentences", "polarity"])],
ignore_index=True
)
valid_df = pd.DataFrame()
for name in os.listdir(valid_base_dir):
file_path = os.path.join(valid_base_dir, name)
valid_df = pd.concat([valid_df,
pd.read_csv(file_path, sep=',', names=["sentences", "polarity"])],
ignore_index=True
)
return train_df, valid_df
train, test = load_datasets()
bert_abs_path = '/home/shravan/Downloads/'
bert_model_name = 'multi_cased_L-12_H-768_A-12'
bert_ckpt_dir = os.path.join(bert_abs_path, bert_model_name)
bert_ckpt_file = os.path.join(bert_ckpt_dir, "bert_model.ckpt")
bert_config_file = os.path.join(bert_ckpt_dir, "bert_config.json")
# Preprocessing
class IntentDetectionData:
DATA_COLUMN = 'sentences'
LABEL_COLUMN = 'polarity'
def __init__(self, train, test, tokenizer: FullTokenizer, classes, max_seq_len):
self.tokenizer = tokenizer
self.max_seq_len = 0
self.classes = classes
# print(train[IntentDetectionData.DATA_COLUMN].str.len().sort_values().index())
train, test = map(lambda df: df.reindex(df[IntentDetectionData.DATA_COLUMN].str.len().sort_values().index),
[train, test])
((self.train_x, self.train_y), (self.test_x, self.test_y)) = map(self._prepare, [train, test])
print("max seq_len", self.max_seq_len)
self.max_seq_len = min(self.max_seq_len, max_seq_len)
self.train_x, self.test_x = map(self._pad, [self.train_x, self.test_x])
def _prepare(self, df):
x, y = [], []
for _, row in tqdm(df.iterrows()):
text, label = row[IntentDetectionData.DATA_COLUMN], row[IntentDetectionData.LABEL_COLUMN]
tokens = self.tokenizer.tokenize(text)
tokens = ['[CLS]'] + tokens + ['[SEP]']
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
self.max_seq_len = max(self.max_seq_len, len(token_ids))
x.append(token_ids)
y.append(self.classes.index(label))
return np.array(x), np.array(y)
def _pad(self, ids):
x = []
for input_ids in ids:
input_ids = input_ids[:min(len(input_ids), self.max_seq_len - 2)]
input_ids = input_ids + [0] * (self.max_seq_len - len(input_ids))
x.append(np.array(input_ids))
return np.array(x)
tokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, 'vocab.txt'))
t = tokenizer.tokenize('ಶುಭ ದಿನ')
print(t)
ds = tokenizer.convert_tokens_to_ids(t)
print(ds)
def create_model(max_seq_len, bert_ckpt_file):
with tf.io.gfile.GFile(bert_config_file, 'r') as reader:
bc = StockBertConfig.from_json_string(reader.read())
bert_params = map_stock_config_to_params(bc)
bert_params.adapter_size = None
bert = BertModelLayer.from_params(bert_params, name='bert')
input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name='input_ids')
bert_output = bert(input_ids)
cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_out = keras.layers.Dropout(0.5)(cls_out)
logits = keras.layers.Dense(units=768, activation='tanh')(cls_out)
logits = keras.layers.Dropout(0.5)(logits)
logits = keras.layers.Dense(units=len(classes), activation='softmax')(logits)
model = keras.Model(inputs=input_ids, outputs=logits)
model.build(input_shape=(None, max_seq_len))
load_stock_weights(bert, bert_ckpt_file)
return model
classes = train.polarity.unique().tolist()
data = IntentDetectionData(train, test, tokenizer, classes, max_seq_len=128)
print(data.train_x.shape)
# Training:
model = create_model(data.max_seq_len, bert_ckpt_file)
print(model.summary())
model.compile(
optimizer=keras.optimizers.Adam(1e-5),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name='acc')]
)
log_dir = 'log/intent_detection' + datetime.datetime.now().strftime("%Y%m%d-%H%M%s")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir)
history = model.fit(
x=data.train_x,
y=data.train_y,
validation_split=0.1,
batch_size=16,
shuffle=True,
epochs=5,
)
check_point_path = '/home/shravan/dissertation/bert_model'
tf.saved_model.save(model, check_point_path)
# model.save(check_point_path)
| [
"shravan007.c@gmail.com"
] | shravan007.c@gmail.com |
fd17bac5687007bfef66049ef53312bd0aee968b | 0725ed7ab6be91dfc0b16fef12a8871c08917465 | /graphs/prims_heapq.py | 8882c5d0bb2a707a4563a2ecdda177d65089d3bd | [] | no_license | siddhism/leetcode | 8cb194156893fd6e9681ef50c84f0355d09e9026 | 877933424e6d2c590d6ac53db18bee951a3d9de4 | refs/heads/master | 2023-03-28T08:14:12.927995 | 2021-03-24T10:46:20 | 2021-03-24T10:46:20 | 212,151,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | from collections import defaultdict
import heapq
def create_spanning_tree(graph, starting_vertex):
mst = defaultdict(set)
visited = set([starting_vertex])
edges = [
(cost, starting_vertex, to)
for to, cost in graph[starting_vertex].items()
]
heapq.heapify(edges)
while edges:
cost, frm, to = heapq.heappop(edges)
if to not in visited:
visited.add(to)
mst[frm].add(to)
for to_next, cost in graph[to].items():
if to_next not in visited:
heapq.heappush(edges, (cost, to, to_next))
return mst
example_graph = {
'A': {'B': 2, 'C': 3},
'B': {'A': 2, 'C': 1, 'D': 1, 'E': 4},
'C': {'A': 3, 'B': 1, 'F': 5},
'D': {'B': 1, 'E': 1},
'E': {'B': 4, 'D': 1, 'F': 1},
'F': {'C': 5, 'E': 1, 'G': 1},
'G': {'F': 1},
}
| [
"siddhesh@hackerearth.com"
] | siddhesh@hackerearth.com |
990ac94bfab38143c21c6c8fe7fece6484ba3172 | b19c9fe62eaa309851dc11f6fd7a05bda463fb58 | /bigfish/apps/collection/admin.py | f3c73d16026c10bd38e07e8404f79a8c0c14d24d | [] | no_license | hyu9999/bigfish | 3ff3b025982e71bd6dd80f60ad6c70e735e98936 | 4189fdcacc20795a4778b53c9d47d6fdd3e71811 | refs/heads/master | 2022-07-08T13:55:12.908583 | 2019-03-22T09:36:12 | 2019-03-22T09:36:12 | 177,055,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from django.contrib import admin
from bigfish.apps.collection.models import UserVoice, UserPhoto
from bigfish.utils.functions import format_admin_list
@admin.register(UserVoice)
class UserVoiceAdmin(admin.ModelAdmin):
list_display = format_admin_list(UserVoice)
@admin.register(UserPhoto)
class UserPhotoAdmin(admin.ModelAdmin):
list_display = format_admin_list(UserPhoto)
| [
"757147959@qq.com"
] | 757147959@qq.com |
0ed67d205313018188b78a8c3edc3641f2c5e5c0 | fc610db81d5cf434ecb348aff2e7b90ea65d2e39 | /tests/core/test_utils.py | 2cfcefb97a62635e28838f4915bb25256f957200 | [
"MIT"
] | permissive | hungphamvn/django-spectator | fd8971942b1cfe7fe3d3358f66291dbce3dedb44 | 32a3297d206f9a2cb58a28d1b895b468cfbf62df | refs/heads/master | 2020-05-26T00:09:44.035364 | 2019-05-05T18:41:30 | 2019-05-05T18:41:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | from django.test import TestCase
from spectator.core.utils import chartify
from spectator.core.factories import IndividualCreatorFactory
class ChartifyTestCase(TestCase):
def setUp(self):
super().setUp()
self.creators = IndividualCreatorFactory.create_batch(5)
self.creators[0].num_readings = 10
self.creators[1].num_readings = 8
self.creators[2].num_readings = 8
self.creators[3].num_readings = 6
self.creators[4].num_readings = 0
def test_default_list(self):
chart = chartify(self.creators, 'num_readings')
self.assertEqual(len(chart), 4)
self.assertEqual(chart[0].chart_position, 1)
self.assertEqual(chart[1].chart_position, 2)
self.assertEqual(chart[2].chart_position, 2)
self.assertEqual(chart[3].chart_position, 4)
def test_cutoff_is_none(self):
"Should include the 0-scoring item."
chart = chartify(self.creators, 'num_readings', cutoff=None)
self.assertEqual(len(chart), 5)
self.assertEqual(chart[0].chart_position, 1)
self.assertEqual(chart[1].chart_position, 2)
self.assertEqual(chart[2].chart_position, 2)
self.assertEqual(chart[3].chart_position, 4)
self.assertEqual(chart[4].chart_position, 5)
def test_cutoff_value(self):
"Should be possible to set a custom cutoff value."
chart = chartify(self.creators, 'num_readings', cutoff=6)
self.assertEqual(len(chart), 3)
self.assertEqual(chart[0].chart_position, 1)
self.assertEqual(chart[1].chart_position, 2)
self.assertEqual(chart[2].chart_position, 2)
def test_ensure_chartiness(self):
"By default list should be empty if all objects have the same score."
creators = IndividualCreatorFactory.create_batch(3)
for c in creators:
c.num_readings = 10
chart = chartify(creators, 'num_readings')
self.assertEqual(len(chart), 0)
def test_ensure_chartiness_false(self):
"Should be possible to disable the behaviour."
creators = IndividualCreatorFactory.create_batch(3)
for c in creators:
c.num_readings = 10
chart = chartify(creators, 'num_readings', ensure_chartiness=False)
self.assertEqual(len(chart), 3)
def test_handle_empty_chart(self):
"There was an error if all items in chart met the cutoff value."
creator = IndividualCreatorFactory()
creator.num_readings = 1
chart = chartify([creator], 'num_readings', cutoff=1)
self.assertEqual(len(chart), 0)
| [
"phil@gyford.com"
] | phil@gyford.com |
cea0b060e4b40cedac41313e5ec52386551b8b1a | 3c0cfa2e88c8779c435ac161882acac5a9254816 | /virasana/analises/image_ratio.py | ff74291c55c6ee7bde5ca41ea3dc52d0cbcf12b5 | [] | no_license | IvanBrasilico/virasana | fc22191ecfcf97f5857027a73ce845e01bc8e8ca | 58954b7d36fe02f16b7f2f34190b43b84835effd | refs/heads/master | 2023-08-31T07:04:44.953661 | 2023-08-25T21:00:22 | 2023-08-25T21:00:22 | 120,934,545 | 0 | 3 | null | 2021-01-29T13:08:43 | 2018-02-09T17:21:43 | Jupyter Notebook | UTF-8 | Python | false | false | 1,207 | py | """"Análise do ratio de imagens por Recinto/Escâner.
Extrai e sumariza relação largura/altura de imagens agrupando por
por Recinto/Escâner para permitir a detecção de imagens que estão
sendo geradas com poucos pulsos de X-Ray/pouca informação e consequentemente
terão a qualidade prejudicada.
"""
import io
import sys
import time
from collections import defaultdict
sys.path.insert(0, '.')
sys.path.insert(0, '../ajna_docs/commons')
from virasana.db import mongodb as db
from ajna_commons.utils.images import mongo_image
from PIL import Image
def do():
print('Iniciando...')
s0 = time.time()
sizes_recinto = defaultdict(list)
cursor = db.fs.files.find({'metadata.contentType': 'image/jpeg',
'metadata.recinto': {'$exists': True}},
{'_id': 1, 'metadata.recinto': 1}).limit(100)
for doc in cursor:
_id = doc['_id']
image = Image.open(io.BytesIO(mongo_image(db, _id)))
# print(image.size)
sizes_recinto[doc['metadata']['recinto']].append(image.size)
s1 = time.time()
print('{:0.2f} segundos'.format(s1 - s0))
print(sizes_recinto)
if __name__ == '__main__':
do()
| [
"brasilico.ivan@gmail.com"
] | brasilico.ivan@gmail.com |
b9a580a0bcfb3b64cee62d98e9208fbe92e05f9a | 316b99c6046ff58c8499e0c214e9b81d9c3132b0 | /beartype/_util/text/utiltextprefix.py | 85f2da32d290220547f451da0925cc9a7f594490 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | beartype/beartype | fb6417b3dc2e08c065f0d907f43411c33d883a7d | 0cfd53391eb4de2f8297a4632aa5895b8d82a5b7 | refs/heads/main | 2023-08-15T13:17:47.095732 | 2023-08-15T05:25:54 | 2023-08-15T05:25:54 | 252,646,465 | 1,992 | 51 | MIT | 2023-07-28T04:13:08 | 2020-04-03T06:06:22 | Python | UTF-8 | Python | false | false | 4,726 | py | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2023 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **text prefix utilities** (i.e., low-level callables creating and
returning human-readable strings describing prominent objects or types and
*always* suffixed by exactly one space character, intended to prefix
human-readable error messages).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype._data.func.datafuncarg import ARG_NAME_RETURN
from beartype._data.hint.datahinttyping import BeartypeableT
from beartype._util.text.utiltextlabel import (
label_callable,
label_type,
)
from collections.abc import Callable
# ....................{ PREFIXERS ~ beartypeable }....................
#FIXME: Unit test this function with respect to classes, please.
def prefix_beartypeable(
obj: BeartypeableT, # pyright: ignore[reportInvalidTypeVarUse]
) -> str:
'''
Human-readable label describing the passed **beartypeable** (i.e., object
that is currently being or has already been decorated by the
:func:`beartype.beartype` decorator) suffixed by delimiting whitespace.
Parameters
----------
obj : BeartypeableT
Beartypeable to be labelled.
All remaining keyword parameters are passed as is to the lower-level
:func:`.label_beartypeable_kind` function transitively called by this
higher-level function.
Returns
----------
str
Human-readable label describing this beartypeable.
'''
# Return either...
return (
# If this beartypeable is a class, a label describing this class;
f'{label_type(obj)} '
if isinstance(obj, type) else
# Else, this beartypeable is a callable. In this case, a label
# describing this callable.
f'{label_callable(obj)} ' # type: ignore[arg-type]
)
# ....................{ PREFIXERS ~ beartypeable : pith }....................
def prefix_beartypeable_pith(func: Callable, pith_name: str) -> str:
'''
Human-readable label describing either the parameter with the passed name
*or* return value if this name is ``return`` of the passed **beartypeable
callable** (i.e., callable wrapped by the :func:`beartype.beartype`
decorator with a wrapper function type-checking that callable) suffixed by
delimiting whitespace.
Parameters
----------
func : Callable
Decorated callable to be labelled.
pith_name : str
Name of the parameter or return value of this callable to be labelled.
Returns
----------
str
Human-readable label describing either the name of this parameter *or*
this return value.
'''
assert isinstance(pith_name, str), f'{repr(pith_name)} not string.'
# Return a human-readable label describing either...
return (
# If this name is "return", the return value of this callable.
prefix_beartypeable_return(func)
if pith_name == ARG_NAME_RETURN else
# Else, the parameter with this name of this callable.
prefix_beartypeable_arg(func=func, arg_name=pith_name)
)
def prefix_beartypeable_arg(func: Callable, arg_name: str) -> str:
'''
Human-readable label describing the parameter with the passed name of the
passed **beartypeable callable** (i.e., callable wrapped by the
:func:`beartype.beartype` decorator with a wrapper function type-checking
that callable) suffixed by delimiting whitespace.
Parameters
----------
func : Callable
Decorated callable to be labelled.
arg_name : str
Name of the parameter of this callable to be labelled.
Returns
----------
str
Human-readable label describing this parameter's name.
'''
assert isinstance(arg_name, str), f'{repr(arg_name)} not string.'
# Create and return this label.
return f'{prefix_beartypeable(func)}parameter "{arg_name}" '
def prefix_beartypeable_return(func: Callable) -> str:
'''
Human-readable label describing the return of the passed **decorated
callable** (i.e., callable wrapped by the :func:`beartype.beartype`
decorator with a wrapper function type-checking that callable) suffixed by
delimiting whitespace.
Parameters
----------
func : Callable
Decorated callable to be labelled.
Returns
----------
str
Human-readable label describing this return.
'''
# Create and return this label.
return f'{prefix_beartypeable(func)}return '
| [
"leycec@gmail.com"
] | leycec@gmail.com |
b51ab4341c846ab28a3ed1eb491f3abae5fb8ba9 | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/ANGenStudy/plots/Plot.py | d1505841e50b3add2dd72d925c3fec7db7ba56c8 | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 4,425 | py | #!/usr/bin/python
import ROOT
from ROOT import TClass,TKey, TIter,TCanvas, TPad,TFile, TPaveText, TColor, TGaxis, TH1F, TPad, TH1D, TLegend
#from ROOT import kBlack, kBlue, kRed, kGreen, kMagenta, kCyan, kOrange, kViolet, kSpring
from ROOT import kBlue, kOrange, kCyan, kRed, kMagenta, kGreen, kViolet, kSpring, kPink, kAzure
from ROOT import gBenchmark, gStyle, gROOT, gDirectory
#from legend import *
#from plotsHelpercomp import *
import re
import sys
CMSlumiPath = '/uscms_data/d3/cuperez/CMSSW_8_0_25/src/scripts/pyroot'
sys.path.append(CMSlumiPath)
from CMSlumi import CMS_lumi, set_CMS_lumi
import argparse
sw = ROOT.TStopwatch()
sw.Start()
LambdaT = "ALL"
SMPythia8 = True
SM = False
ADD = True
tag = "b"
zoom = False
#drawstyle = "hist, same"
drawstyle = "same"
intlumi = 130
BKG = []
path = "/uscms_data/d3/cuperez/CMSSW_8_0_25/src/scripts/Analysis_v1/UnparticlesSplitStudy"
BKG.append("%s/Unparticles_SM_M_500-2000.root" %(path))
BKG.append("%s/Unparticles_SM_M-2000.root" %(path))
BKG.append("../processed/GGJetsAN_M-1000.root")
DATASET = []
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp1500p0_spin-0_M_500-2000_py_GEN.root")
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp1500p0_spin-2_M_500-2000_py_GEN.root")
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp2500p0_spin-0_M_500-2000_py_GEN.root")
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp2500p0_spin-2_M_500-2000_py_GEN.root")
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp4000p0_spin-0_M_500-2000_py_GEN.root")
DATASET.append("../MkClassScripts/OUTTestSTest1p1Unp4000p0_spin-2_M_2000_py_GEN.root")
#
# Draw Options
DrawAsHi = False
gStyle.SetOptStat(0)
bkgf = []
for fi in BKG:
bkgf.append(ROOT.TFile(fi, "READ"))
uf = []
for datafile in DATASET:
uf.append(ROOT.TFile(datafile, "READ"))
canvas = ROOT.TCanvas()
canvas.SetLogy()
obj = "gendiphotonMinv"
uh = []
bkgh = []
for ofile in bkgf:
bkgh.append(ofile.Get(obj))
for openfile in uf:
uh.append(openfile.Get(obj))
xtitle = r"m_{#gamma#gamma}#scale[1.0]{(GeV)}"
ytitle = r"#scale[1.0]{Nevents}"
xmin, xmax = 500, 13000
if zoom:
xmin, xmax = 1000, 2500
x_range = "%s-%s" %(str(xmin), str(xmax))
xpos1, ypos1, xpos2, ypos2 = .55, 0.58, .85, .88
leg = TLegend(xpos1, ypos1, xpos2, ypos2)
leg.SetBorderSize(0)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetTextFont(42)
leg.SetTextSize(0.035)
if SMPythia8:
tag = tag + "SM"
histSM = bkgh[0].Clone("histSM")
histSM.Add(bkgh[1], 1.0)
histSM.SetFillStyle(3144)
histSM.SetFillColor(7)
histSM.Scale(intlumi)
histSM.Draw("hist")
label = "SM"
leg.AddEntry(histSM, "%s" %(label), "f")
print "Drawn", label
if SM:
tag = tag + "SM"
histSM = bkgh[3].Clone("histSM")
#histSM.Add(bkgh[1], 1.0)
histSM.SetFillStyle(3144)
histSM.SetFillColor(7)
histSM.Scale(intlumi)
#histSM.Draw("hist")
label = "SM"
leg.AddEntry(histSM, "%s" %(label), "f")
print "Drawn", label
colorlist = [kBlue, kOrange, kCyan, kRed, kMagenta, kGreen, kViolet, kSpring, kPink, kAzure, kOrange+8, kGreen+8, kRed+8, kViolet+8, kMagenta+5]
labels = []
histClones = []
iset = 0
icolor = 0
i = 0
while iset < len(DATASET):
pattern = "TestADD_NI-1_([^(]*)_M-1000.root"
label = re.findall(pattern, DATASET[iset])
labels.append(label[0])
tag = tag + label[0]
#histClone.delete
iset = iset + 1
while i < len(DATASET):
histClone = uh[i].Clone("histdu%s" %(labels[i]))
#histClone.Add(uh[i+1], 1.0)
histClones.append(histClone)
i = i + 1
j = 0
for histclone in histClones:
histclone.SetLineColor(colorlist[icolor])
histclone.Scale(intlumi)
histclone.Draw(drawstyle)
print labels[j]
leglabel = r"d#Lambda_{T} = %s" %(labels[j])
leg.AddEntry(histclone, "%s" %(leglabel), "l")
j = j+1
icolor = icolor + 1
#iclone = 0
#while iclone < len(histClones):
# histClones[iclone].Add(uh[iclone+1], 1.0)
# iclone.SetLineColor(colorlist[icolor])
# iclone.Scale(intlumi)
# iclone.Draw(drawstyle)
# leglabel = r"du = %s, #Lambda_{U} = %s" %(label)
# leg.AddEntry(histClone, "%s" %(leglabel), "l")
# histClone.delete
#
# icolor = icolor + 1
print tag
histSM.GetYaxis().SetTitle(ytitle)
histSM.GetYaxis().SetTitleOffset(1.0)
histSM.GetXaxis().SetTitle(xtitle)
histSM.GetXaxis().SetRangeUser(xmin, xmax)
leg.Draw()
set_CMS_lumi(canvas, 4, 11, intlumi)
canvas.Update()
canvas.Draw()
canvas.Print("LOG%s_SMvsADD_%sfb-1_%s_%s.pdf" %(intlumi, LambdaT, obj,tag))
| [
"uzzie.perez@cern.ch"
] | uzzie.perez@cern.ch |
a60d525dcf91219404c259df8956699c19f69cff | bfd41fc543f6dbfc821341522cf8e7a9d2e34ce8 | /venv/bin/xhtml2pdf | de9af05ac590655a7ef64f3ce643a5f9be0576b6 | [] | no_license | MaraKovalcik/Flask | 783243560ead637a381f76d3893da2b212eff898 | 1ff8413f3551b051f8e6c76db6cf402fc7428188 | refs/heads/master | 2021-01-22T09:09:16.165734 | 2015-02-24T16:57:14 | 2015-02-24T16:57:14 | 31,268,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | #!/home/mara/Dokumenty/PycharmProjects/flask-skeleton/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'xhtml2pdf==0.0.6','console_scripts','xhtml2pdf'
__requires__ = 'xhtml2pdf==0.0.6'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('xhtml2pdf==0.0.6', 'console_scripts', 'xhtml2pdf')()
)
| [
"mara.kovalcik@gmail.com"
] | mara.kovalcik@gmail.com | |
d57a1672ca8cc1d7d86cde2c960bab5c824c62a4 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/scaleform/daapi/view/meta/customizationfilterspopovermeta.py | 5b2d5a5854daef6458e0c2117852d24bd8aa65ae | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,264 | py | # 2016.02.14 12:40:16 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/CustomizationFiltersPopoverMeta.py
from gui.Scaleform.daapi.view.lobby.popover.SmartPopOverView import SmartPopOverView
class CustomizationFiltersPopoverMeta(SmartPopOverView):
def changeFilter(self, groupId, itemId):
self._printOverrideError('changeFilter')
def setDefaultFilter(self):
self._printOverrideError('setDefaultFilter')
def as_setInitDataS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setInitData(data)
def as_setStateS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setState(data)
def as_enableDefBtnS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_enableDefBtn(value)
def as_enableGroupFilterS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_enableGroupFilter(value)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\customizationfilterspopovermeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:40:16 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
7e439da3b100e153179f8cab56a740cc09a6d15e | f15ba8cdc7074692acadb5aa40f0647e27901c0f | /backend/driver/migrations/0001_initial.py | 24750369ddd51e15f0a9cf127bd27a98dd62bb67 | [] | no_license | crowdbotics-apps/mexican-18491 | 5f964019aa1d6a1848ff9e9ca4ddff533ef6ceaa | dffd7c9f02ebca9439bb26ee19ec6fdd689b4c94 | refs/heads/master | 2022-11-11T09:17:11.959916 | 2020-06-29T19:23:08 | 2020-06-29T19:23:08 | 275,906,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # Generated by Django 2.2.13 on 2020-06-29 19:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_order', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DriverProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('details', models.TextField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='driverprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DriverOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='driverorder_driver', to='driver.DriverProfile')),
('order', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='driverorder_order', to='delivery_order.Order')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
5e8515ccebe8461c731142db75dd9aa8e3c753fe | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/s3fd_for_PyTorch/data/factory.py | d27ba99ebd8bf87a32b5f964e2ba145d9e67fea1 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,486 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .widerface import WIDERDetection
from .config import cfg
import torch
def dataset_factory(dataset):
"""
dataset_factory
"""
if dataset == 'face':
train_dataset = WIDERDetection(cfg.FACE.TRAIN_FILE, mode='train')
val_dataset = WIDERDetection(cfg.FACE.VAL_FILE, mode='val')
return train_dataset, val_dataset
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on
0 dim
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0])
targets.append(torch.FloatTensor(sample[1]))
return torch.stack(imgs, 0), targets
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
30d87a4b7451160fb6db181e5d913dcfaad1dbb9 | 0dca69b51629f06e66b2d3263f287cccf2f08fc4 | /src/dungeonbot/models/highlights.py | 30494bc281d3f8a0b98029876bb57dcfdc5917aa | [
"MIT"
] | permissive | DungeonBot/dungeonbot | c30bb433a25b8a69b346e9b900674d64b5ddace5 | 715c14d3a06d8a7a8771572371b67cc87c7e17fb | refs/heads/master | 2021-06-22T18:34:05.270805 | 2017-04-03T22:40:56 | 2017-04-04T02:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from dungeonbot.models import db
from datetime import datetime
class HighlightModel(db.Model):
"""Model for campaign highlights."""
__table_args__ = {"extend_existing": True}
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(256))
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
@classmethod
def new(cls, args=None, session=None):
"""Create and store new Highlight."""
if not args:
return
if session is None:
session = db.session
instance = cls(text=args)
session.add(instance)
session.commit()
return instance
@classmethod
def list(cls, how_many=10, session=None):
"""Retrieve stored highlights, limited by 'how_many'."""
if session is None:
session = db.session
return (
session.query(cls).
order_by('created desc').
limit(how_many).
all()
)
| [
"tanner.lake@gmail.com"
] | tanner.lake@gmail.com |
0f4f218d5415925d9816306417dabc6360e29595 | 4c45f5bbd42bda74acb304e9f178bda93df01f8b | /simple_password.py | 073228b41cfc962ca4ce9d1d55ef3d0b57bb9572 | [] | no_license | bpavankumar5656/coderbyte | d39bd6657a810758489eebc4f025e82bdbbc7bf1 | 7455ce6d9a92b46eb21dcd5d353c48da9f4e5455 | refs/heads/master | 2023-03-17T22:47:16.867478 | 2019-07-22T01:45:15 | 2019-07-22T01:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | """
Have the function SimplePassword(str) take the str parameter being passed and determine
if it passes as a valid password that follows the list of constraints:
1. It must have a capital letter.
2. It must contain at least one number.
3. It must contain a punctuation mark.
4. It cannot have the word "password" in the string.
5. It must be longer than 7 characters and shorter than 31 characters.
If all the above constraints are met within the string, the your program should return the string true,
otherwise your program should return the string false. For example: if str is "apple!M7" then your program should return "true".
Use the Parameter Testing feature in the box below to test your code with different arguments.
"""
import re
def SimplePassword(str):
upper = False
number = False
mark = False
if 'password' in str.lower():
return "false"
if len(str) < 7 or len(str) > 31:
return "false"
for char in str:
upper = upper if not char.isupper() else True
number = number if not char.isdigit() else True
mark = mark if not re.match(r'\W', char) else True
return "true" if (upper and number and mark) else "false"
# keep this function call here
print (SimplePassword(raw_input()))
| [
"luismiguel.mopa@gmail.com"
] | luismiguel.mopa@gmail.com |
b04cfcf235cf8701e45fd7b18703a018bd407ec4 | be12916ec075c76469946f6da2cdde4857141702 | /771_3.py | 03c1bf738765f51107f25690d14671aafa1a7d09 | [] | no_license | goodsosbva/algorithm | 7b2f9b1a899e43c7d120ab9d40e672be336a23d9 | 0bc44e9466d2d32a4b4e126e073badc60172da6e | refs/heads/main | 2023-08-06T19:34:40.630006 | 2021-09-20T10:34:41 | 2021-09-20T10:34:41 | 334,918,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def numJewelsInStones(J: str, S: str) -> int:
return sum(s in J for s in S)
jew = "aA"
stone = "aAAbbbbaa"
jcount = numJewelsInStones(jew, stone)
print(jcount) | [
"noreply@github.com"
] | goodsosbva.noreply@github.com |
66abb4d2ef1586a2f3a282777ffe748d4596aa7c | a951bcce35dfa63db7a812bd27c1863f286e37cf | /tests/testflows/ldap/role_mapping/regression.py | a2c70d8bd4149a454b754f8058e1b2d2bafdf73a | [
"Apache-2.0"
] | permissive | nikitamikhaylov/ClickHouse | 294be1c43cbb0e6100145ce4cc5d3fb1191c0de2 | 88629657ca54f92c7fe1bf3f055e3389668ded3c | refs/heads/master | 2022-03-02T09:35:26.300566 | 2022-01-27T10:09:17 | 2022-01-27T10:09:17 | 197,409,528 | 1 | 3 | Apache-2.0 | 2021-11-10T16:03:27 | 2019-07-17T14:50:45 | C++ | UTF-8 | Python | false | false | 1,576 | py | #!/usr/bin/env python3
import os
import sys
from testflows.core import *
append_path(sys.path, "..", "..")
from helpers.cluster import Cluster
from helpers.argparser import argparser
from ldap.role_mapping.requirements import *
# Cross-outs of known fails
xfails = {
"mapping/roles removed and added in parallel":
[(Fail, "known bug")],
"user dn detection/mapping/roles removed and added in parallel":
[(Fail, "known bug")]
}
@TestFeature
@Name("role mapping")
@ArgumentParser(argparser)
@Specifications(
SRS_014_ClickHouse_LDAP_Role_Mapping
)
@Requirements(
RQ_SRS_014_LDAP_RoleMapping("1.0")
)
@XFails(xfails)
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
"""ClickHouse LDAP role mapping regression module.
"""
nodes = {
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
}
if stress is not None:
self.context.stress = stress
if parallel is not None:
self.context.parallel = parallel
with Cluster(local, clickhouse_binary_path, nodes=nodes,
docker_compose_project_dir=os.path.join(current_dir(), "ldap_role_mapping_env")) as cluster:
self.context.cluster = cluster
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"), name="ldap sanity")
Feature(run=load("ldap.role_mapping.tests.server_config", "feature"))
Feature(run=load("ldap.role_mapping.tests.mapping", "feature"))
#Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature"))
if main():
regression()
| [
"vzakaznikov@protonmail.com"
] | vzakaznikov@protonmail.com |
da59520dd091280c17c50e446feb08a2e5b97db6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pygame/pygameweb/pygameweb/project/models.py | b6b610b87e3c83391214fcd286c9756c9b1a504a | [
"BSD-2-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:30636a4eef22c0481f5af61e690f051065e2a438fd56ed9ba2af78c5d3611faa
size 6807
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
1c7a659445f9609b12a3b52e85abebe758640fb3 | 129d0272314aae880c88280008b4ce01049e34ef | /randori/socialnet/models/old/revision.py | 9db6fc56aeff436c81a527005cfa4757017a1713 | [] | no_license | fstakem/Amphora | 4eacab3c3af33dd8e3dd0121f3d05dce81abeddb | f4164e583db18e8947a5f5f79ca13d861c351694 | refs/heads/master | 2021-01-15T20:38:11.169657 | 2014-02-05T14:31:31 | 2014-02-05T14:31:31 | 13,034,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # +++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++
#
# File: revision.py
# By: Fred Stakem
# For: Private Research
# Date: 12.5.13
#
# +++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++---+++
# Librarys
from django.db import models
# App imports
# Main
class Revision(models.Model):
class Meta():
app_label = 'socialnet'
# Attributes
# Relationships
# Add later tree
# -> parent
# -> children
# Add later
# -> owner
version = models.ForeignKey('Version', related_name='in_revision')
project = models.ForeignKey('Project', blank=True, null=True, related_name='previous_revision')
software_stack = models.ManyToManyField('SoftwareStack', blank=True, null=True, related_name='used_for_revision')
def __unicode__(self):
return self.version.__unicode__()
def name(self):
return self.version.name()
def dotName(self):
return self.version.dotName() | [
"fstakem@gmail.com"
] | fstakem@gmail.com |
8e480d623a40f3dba37d571a29668dc0047c4345 | b16d94254ad16565e1d197e74fa2c24d9a8506ba | /src-distributed-qanda-alt/utils.py | 247ccbaa085453fad197bfc5128122ee28d35066 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"Python-2.0",
"MIT"
] | permissive | data-science-on-aws/data-science-on-aws | 438455319f05e18e9d154777a474db26cd73005f | 2e2405f2968c454065447a0ef9aa1dcc2c05b477 | refs/heads/generative | 2023-07-22T16:55:38.372524 | 2023-05-03T03:31:04 | 2023-05-03T03:31:04 | 244,029,618 | 687 | 231 | Apache-2.0 | 2023-04-12T17:01:11 | 2020-02-29T19:33:58 | Jupyter Notebook | UTF-8 | Python | false | false | 5,245 | py |
import argparse
from contextlib import contextmanager
import torch
import os
import json
from transformers import (
MODEL_MAPPING,
SchedulerType
)
# Distributed training helper methods.
def wait_for_everyone():
#deepspeed.comm.barrier
torch.distributed.barrier()
def is_main_process(rank):
if rank == 0:
return True
else:
return False
def _goes_first(is_main):
if not is_main:
wait_for_everyone()
yield
if is_main:
wait_for_everyone()
@contextmanager
def main_process_first(rank):
"""
Lets the main process go first inside a with block.
The other processes will enter the with block after the main process exits.
"""
yield from _goes_first(is_main_process(rank))
def is_local_main_process(local_rank):
return local_rank == 0
# args parsing
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a FLAN T5 model on a Seq2Seq task")
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=1,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=1,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=500,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--seed", type=int, default=100, help="A seed for reproducible training.")
parser.add_argument(
"--block_size",
type=int,
default=None,
help=(
"Optional input sequence length after tokenization. The training dataset will be truncated in block of"
" this size for training. Default to the model max input length for single sentence inputs (take into"
" account special tokens)."
),
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument("--group_texts",default=False,help="Whether to group texts together when tokenizing")
# TODO: Add -- to these 2:
parser.add_argument("checkpoint_dir", type=str, default="/opt/ml/checkpoints")
parser.add_argument("model_dir", type=str, default="/opt/ml/model")
args,_ = parser.parse_known_args()
# Sanity checks
if args.train_file is None and args.validation_file is None:
raise ValueError("Need training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or json file."
return args | [
"chris@fregly.com"
] | chris@fregly.com |
87163c6d533958ad48dd7f695826f2d527faa969 | e7ba8d27898fbd4fe396da19f82ab9317b3518cf | /goods/urls.py | 8074753fd953bf5de7316547db5db75f237ac077 | [] | no_license | LYblogs/fresh_shop | 8c135d9f7c59800c1409375854e1941f24d5c60e | 2eafcb355d049f47de2c45c68e733bf2759706b9 | refs/heads/master | 2020-04-17T18:00:19.775074 | 2019-04-04T04:10:18 | 2019-04-04T04:10:18 | 166,807,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from django.urls import path
from goods import views
urlpatterns = [
path('index/', views.index, name='index'),
path('detail/<int:id>/', views.detail, name='detail'),
# 查看更多
path('my_list/<int:id>/', views.my_list, name='my_list'),
# 搜索
path('goods_search/', views.goods_search, name='goods_search')
]
| [
"2271032145@qq.com"
] | 2271032145@qq.com |
522e61f62558044453ae1ab3f13b44f3bc194901 | 94e7c790d17ba08e8a2a74077dd8b75e7ac120b0 | /chapter04/Exercise21_04.py | b3ce562574e71019474672c8bd23fea44b4b7cce | [] | no_license | lutfar9427/Exercises_Solution_of_INTRODUCTION_TO_PROGRAMMING_USING_Python | 9632e515428685dcaa7d057cf52f0e191e9f7ae0 | d037475316e6c6b7c6a7a7023318ef4ab4ed3f8d | refs/heads/master | 2020-09-02T09:04:44.990668 | 2018-10-20T00:50:12 | 2018-10-20T00:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | '''
**4.21 (Science: day of the week) Zeller’s congruence is an algorithm developed by
Christian Zeller to calculate the day of the week. The formula is
where
■ h is the day of the week (0: Saturday, 1: Sunday, 2: Monday, 3: Tuesday,
4: Wednesday, 5: Thursday, 6: Friday).
■ q is the day of the month.
■ m is the month (3: March, 4: April, ..., 12: December). January and February are
counted as months 13 and 14 of the previous year.
■ j is the century (i.e., ).
■ k is the year of the century (i.e., year % 100).
Write a program that prompts the user to enter a year, month, and day of the
month, and then it displays the name of the day of the week.
/**
* @author BASSAM FARAMAWI
* @email tiodaronzi3@yahoo.com
* @since 2018
*/
'''
# Asking for entering year
year = eval(input("Enter year: (e.g., 2012): "))
# Asking for entering m
m = eval(input("Enter month: 1-12: "))
# Convert input from 1 to 13 and from 2 to 14
if m == 1:
m = 13
year = year - 1
elif m == 2:
m = 14
year = year - 1
# Asking for entering day of the m
q = eval(input("Enter the day of the month: 1-31: "))
j = year // 100 # Compute the century
k = year % 100 # Compute the year of the century
# Calculate day of the week
h = (q + 26 * (m + 1) / 10 + k + k / 4 + j / 4 + 5 * j) % 7 // 1
# Display the result
print("Day of the week is", end=' ')
if h == 0:
print("Saturday")
elif h == 1:
print("Sunday")
elif h == 2:
print("Monday")
elif h == 3:
print("Tuesday")
elif h == 4:
print("Wednesday")
elif h == 5:
print("Thursday")
elif h == 6:
print("Friday")
| [
"tiodaronzi3@yahoo.com"
] | tiodaronzi3@yahoo.com |
a10b2a412fcd9de3437f75d4632df9421ff257d4 | ba2717fd81a20e7479ea710153a3a9cea5fcbed5 | /societegeneralelu/spiders/spider.py | 0e1468bde2a01c9e5b3ec953701f6ae38f5f08b8 | [] | no_license | hristo-grudev/societegeneralelu | 52d30f1a942945539ce29d7eee952f69d476fef9 | 166ad7ae8425ffbed5fee2f7d75c7a1dceb0a469 | refs/heads/main | 2023-03-25T16:18:04.997663 | 2021-03-23T09:13:25 | 2021-03-23T09:13:25 | 350,647,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import SocietegeneraleluItem
from itemloaders.processors import TakeFirst
import requests
url = "https://www.societegenerale.lu/fr/societe-generale-luxembourg/communiques-presse-actualites/"
base_payload = "tx_bisgsummary_pi2%5Bpage%5D={}&tx_bisgsummary_pi2%5Btext%5D=&tx_bisgsummary_pi2%5Byear%5D=0&tx_bisgsummary_pi2%5BremoveWrapperToListing%5D=true&no_cache=true&tx_bisgsummary_pi2%5BajaxCall%5D=true&tx_bisgsummary_pi2%5BajaxMethod%5D=refreshResults&tx_bisgsummary_pi2%5BforceConf%5D=&tx_bisgsummary_pi2%5BidContent%5D=13134"
headers = {
'authority': 'www.societegenerale.lu',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'accept': 'text/html, */*; q=0.01',
'x-requested-with': 'XMLHttpRequest',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://www.societegenerale.lu',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.societegenerale.lu/fr/societe-generale-luxembourg/communiques-presse-actualites/',
'accept-language': 'en-US,en;q=0.9,bg;q=0.8',
'cookie': 'civicAllowCookies=yes; _ga=GA1.2.781032923.1616422955; _gid=GA1.2.1248201824.1616422955; _pk_ses.186.cb62=1; _pk_id.186.cb62=be2f692b2d249855.1616422955.1.1616422993.1616422955.; SERVERID=f0'
}
class SocietegeneraleluSpider(scrapy.Spider):
name = 'societegeneralelu'
start_urls = ['https://www.societegenerale.lu/fr/societe-generale-luxembourg/communiques-presse-actualites/']
page = 1
def parse(self, response):
payload = base_payload.format(self.page)
data = requests.request("POST", url, headers=headers, data=payload)
raw_data = scrapy.Selector(text=data.text)
post_links = raw_data.xpath('//div[contains(@id, "card2")]/@data-ref').getall()
for post in post_links:
link = 'https://www.societegenerale.lu/fr/type/1234/ajaxsid/' + post
yield response.follow(link, self.parse_post)
if post_links:
self.page += 1
yield response.follow(response.url, self.parse, dont_filter=True)
def parse_post(self, response):
title = response.xpath('//h1/text()').get()
description = response.xpath('//div[@class="intro" or @class="sgnews_single_content"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="sgnews_single_date"]/text()').get()
item = ItemLoader(item=SocietegeneraleluItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
c120e49f4f9c35c388c6951b0c0ce3df24e343ce | 3d16bcf91c546dfc638bf9e48d7690e8aed37ee2 | /tests/Cpl/Io/File/_0test/ansi/linux/gcc/tca.py | 6128bb6427c8ac96434fb081af2c6a6d7466e4b4 | [] | no_license | johnttaylor/colony.core | 7c3aa43abdd564689e1540795b8044228b97271c | e00902d33c9224a34e9f68edb02c18eb9571b09f | refs/heads/master | 2023-07-24T08:34:04.956247 | 2023-06-20T00:02:55 | 2023-06-20T00:02:55 | 31,176,673 | 2 | 2 | null | 2023-06-17T21:56:08 | 2015-02-22T19:38:07 | C | UTF-8 | Python | false | false | 378 | py | #!/usr/bin/python3
"""Invokes NQBP's tca_base.py script"""
import os
import sys
# Make sure the environment is properly set
NQBP_BIN = os.environ.get('NQBP_BIN')
if ( NQBP_BIN == None ):
sys.exit( "ERROR: The environment variable NQBP_BIN is not set!" )
sys.path.append( NQBP_BIN )
# Find the Package & Workspace root
from other import tca_base
tca_base.run( sys.argv )
| [
"john.t.taylor@gmail.com"
] | john.t.taylor@gmail.com |
0d9b15ea3c7bec732cb0284bec313aeb007f2744 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/7b/b0003d536ee000161581bdbc63c32924 | 8542d9d0d0a25cefdc1e28b41a9026d4a97d1dc3 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | #!/usr/bin/env python
#coding:UTF-8
from ebank.ui.ui import ui
from ebank.model.account import account
def main():
while True:
ui_handle = ui()
ui_handle.welcome()
user = ui_handle.login()
user_id = user[0]
print(type(user_id))
user_pwd = user[1]
print(account.chkUser(user_id))
if __name__ == '__main__':
main()
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
0c3795f1b60250200e5641ae38455ed69ba6b028 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/boolean/Schema+Instance/NISTXML-SV-IV-list-boolean-minLength-1-5.py | 08d968ec0e7c26b5c3c3a9b5afb4d3332bc7d2c7 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 344 | py | from output.models.nist_data.list_pkg.boolean.schema_instance.nistschema_sv_iv_list_boolean_min_length_1_xsd.nistschema_sv_iv_list_boolean_min_length_1 import NistschemaSvIvListBooleanMinLength1
obj = NistschemaSvIvListBooleanMinLength1(
value=[
True,
True,
True,
True,
True,
True,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d34e775ac8013ebec76c1305b47bb894ae4f6049 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2645486_0/Python/plam/b.py | cdc42c6429eb2ac1a36ffb3671d185e9df7da650 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | #!/usr/bin/python
import sys
data = file( sys.argv[1] ).read().splitlines()
l = data.pop( 0 )
N = int(l)
CACHE = {}
def MAX( i, CURE ):
max = 0
if i >= N:
return 0
if CURE <= 0:
return 0
if CURE > E:
CURE = E
for e in range(0, CURE+1):
if CURE-e+R < 0:
continue
m = V[i]*e + MAX(i+1,CURE-e+R)
if m > max:
# print 'max is',i,e,m
max = m
return max
for CASE in range(1,N+1):
print 'Case #%d:' % CASE,
l = data.pop( 0 )
E, R, N = l.split(' ')
E = int(E)
R = int(R)
N = int(N)
# print E, R, N
l = data.pop( 0 )
VT = l.split(' ')
V = []
for v in VT:
V.append( int(v))
# print V
ans = MAX(0,E)
print ans
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
310da1a7f93ce7516e25d63143c848e4ff876c3d | 199c19abc71108b4507d1689537a632ccb1a76f9 | /session4/ss4_intro.py | 69e34330da24254fe7ef169654a3e559bcac2aef | [] | no_license | VetKira/daotrongcuong-fundamentals-c4e23 | 15d1932c95574462540088b3abba7fed6631c03f | ea1da093146ef5c69640bb57cb13aee7f568cfcd | refs/heads/master | 2020-04-02T17:18:49.342412 | 2018-11-01T13:29:34 | 2018-11-01T13:29:34 | 154,652,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # person = ["Kira","Hai Phong","WRU",22,4,257,False]
# person={} # neu co ngoac{} python se hieu day la dictionary
# print(person)
# print(type(person))
# person={
# "name": "kira" #key:value
# }
# print(person)
# print(type(person))
person={
"name":"kira",
"place":"hai phong",
"age": 22,
}
# print(person)
# person["girlfriend"] = 257 #[key]=value
# print(person)
# print(person["name"]) # hoac dat i = " name " r thay i vao
# key = " name"
# if key in person:
# print(person[key])
# else:
# print("not found")
#update :
# person["age"] = 23
# print(person)
#xoa :
del person["age"]
print(person)
#list thi theo index , con diction thi theo key | [
"doanxem.ml@gmail.com"
] | doanxem.ml@gmail.com |
ed838a8ef2fb1149ebf50badc8a41115bf5450e8 | 1f84e6428dea4a17a8e2df15fa6f1ec3404ec0a5 | /test/integrationTests.py | 8bf2cc868e0ce631d3ce7cacdd80667d5ac2fa4e | [] | no_license | hejibo/pythonTranslator | cf332ee8ff29300fada215fe5e794f8579516fb1 | c5586d6ea186311a930507f0d827069b9fef805f | refs/heads/master | 2020-12-24T10:24:22.002897 | 2013-03-23T16:59:57 | 2013-03-23T16:59:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: terry.yinzhe@gmail.com
#
import unittest
from subprocess import Popen, PIPE
import os
import dub.resource as resource
from .testData import typeOFTranslatedLineInList, WELCOME
import sys
decode = [lambda x:x, lambda x:x.decode('UTF-8')][sys.version_info.major>2]
class testDubForPythonInInteractiveMode(unittest.TestCase):
def setUp(self):
env = os.environ.copy()
env["PYTHONSTARTUP"] = "dubShell.py"
self.shell = Popen("python -i".split(), stdin = PIPE, stdout = PIPE, stderr = PIPE, env = env)
def testShouldSeeWelcomeInformation(self):
stdout, stderr = self.shell.communicate("")
self.assertIn(WELCOME + resource.VERSION, decode(stderr))
self.assertEqual('', decode(stdout))
def testShouldSeeTranslatedSyntaxError(self):
stdout, stderr = self.shell.communicate("1+\n".encode("UTF-8"))
self.assertTrue(typeOFTranslatedLineInList('SyntaxError', decode(stderr).splitlines()))
self.assertEqual('', decode(stdout))
def testShouldNotSeeTranslatedSyntaxErrorWhenNoInput(self):
stdout, stderr = self.shell.communicate("")
self.assertFalse(typeOFTranslatedLineInList('SyntaxError', decode(stderr).splitlines()))
self.assertEqual('', decode(stdout))
class testDubForPythonInProgramMode(unittest.TestCase):
def testShouldSeeNoErrorWhenEverythingIsOK(self):
self.shell = Popen("python example.py".split(), stdin = PIPE, stdout = PIPE, stderr = PIPE)
stdout, stderr = self.shell.communicate("")
self.assertEqual([], decode(stderr).splitlines())
def testShouldSeeTranslationOfTheError(self):
self.shell = Popen("python example.py 1+\n".split(), stdin = PIPE, stdout = PIPE, stderr = PIPE)
stdout, stderr = self.shell.communicate("")
self.assertTrue(typeOFTranslatedLineInList('SyntaxError', decode(stderr).splitlines()))
class testDubForProgramUsingTraceback(unittest.TestCase):
def testShouldGetDualLanguageTraceback(self):
import dub
import traceback
import sys
try:
eval("1+\n")
except:
etype, value, tb = sys.exc_info()
traceList = traceback.format_exception(etype, value, tb)
self.assertTrue(typeOFTranslatedLineInList('SyntaxError', traceList))
if __name__ == '__main__':
unittest.main() | [
"terry@odd-e.com"
] | terry@odd-e.com |
2087b9e600595db1a0b6a7ba440d1bbb94384897 | 886f8474f63e5cc98378f5194ae396e9860918f3 | /translatie/ssh.py | 69d0322c935ef94c12a40836115b7ebd4a353153 | [] | no_license | flybetter/Translate | 9b4b1d9ceaefe243997d31bff1dfee3944b4d394 | fb0adeda489ff5dd6785d0625e7b246828d25445 | refs/heads/master | 2021-07-12T17:30:53.776441 | 2017-10-15T00:57:13 | 2017-10-15T00:57:13 | 106,973,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@project= Translate
@file= ssh
@author= wubingyu
@create_time= 2017/10/8 下午7:38
"""
from pexpect import pxssh
import re
def send_command(s, cmd):
s.sendline(cmd)
s.prompt()
# print s.before
return s.before
def connect(host, user, password):
try:
s = pxssh.pxssh()
s.login(host, user, password)
return s
except:
print "error"
exit(0)
def main():
s = connect("10.245.250.38", "root", "cmsroot")
result = send_command(s, "ps -ef|grep java")
line = result.split('\r')[4]
print line
print re.findall(r"\S\w*/\w*/java", line)
if __name__ == '__main__':
main()
| [
"flybetter@163.com"
] | flybetter@163.com |
aaa0047af98a795f9cec78bad8b8c1e5d46a56d5 | 1254745d2062d2d5475e209c269f9c9c68f04084 | /bin/sclrq.py | 8b2d15521871f3389ff34b6d0c266ffca82845db | [] | no_license | noobermin/lspplot | 99d2f9f748be91ed90690c76460edc44cf398da0 | 15822c5502f42b590403058e5a2244af3063478f | refs/heads/master | 2022-11-06T02:47:51.161575 | 2022-10-27T04:45:59 | 2022-10-27T04:45:59 | 56,020,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,388 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Just render something.
Usage:
./sclrq.py [options] (--show|-s) <i>
./sclrq.py [options] <i> <outname>
Options:
--help -h
--show -s Show
--nozip -U sclr/flds are NOT gzipped.
--zip -Z sclr/flds are gzipped. If neither of these two are set,
guess based on name.
--log10 -l Log it.
--lims=LIM Set lims [default: (1e18,1e23)]
--highlight=H Set highlight.
--quantity=Q -Q Q Render this quantity [default: RhoN10]
--dir=D -D D Read from this dir [default: .]
--restrict=R Restrict it.
--x-restrict=R Restrict by positions as a 4 tuple.
--t-offset=T Set time offset in fs. [default: 0].
--title=T Set the title [default: Electron density]
--units=U Set the colorbar units [default: number/cc]
--laser Plot contours of the laser poyting vector.
--intensity=I -I I Make a contour of this intensity [default: 3e18]
--equal -E Make spatial dimensions equal.
--cmap=CMAP Set the colormap. [default: viridis]
--nofloor Raise an error if there are no positive values for log.
--flip -F Flip instead rotate (ie., flip x axis) as in older
versions.
--no-ticks Don't include ticks.
--orientation=V "V" for vertical or "H" for horizontal [default: V]
'''
from docopt import docopt;
import numpy as np;
import numpy.linalg as lin;
from pys import parse_ftuple, parse_ituple;
from lspreader.flds import read_indexed, restrict;
from lspplot.sclr import S;
from lspplot.pc import pc,highlight,timelabel;
from lspplot.physics import c,mu0,e0;
opts = docopt(__doc__,help=True);
quantity = opts['--quantity'];
fvar=['E','B'] if opts['--laser'] else None;
titlestr=opts['--title']
units=opts['--units'];
svar=[quantity];
if opts['--nozip']:
gzip = False;
elif opts['--zip']:
gzip = True;
else:
gzip = 'guess';
#####################################
#reading data
d = read_indexed(int(opts['<i>']),
flds=fvar,sclr=svar,
gzip=gzip,dir=opts['--dir'],
gettime=True,vector_norms=False);
#choosing positions
ylabel = 'z' if np.isclose(d['y'].max(),d['y'].min()) else 'y';
if opts['--x-restrict']:
res = parse_ftuple(opts['--x-restrict'], length=4);
res[:2] = [ np.abs(d['x'][:,0]*1e4 - ires).argmin() for ires in res[:2] ];
res[2:] = [ np.abs(d[ylabel][0,:]*1e4 - ires).argmin() for ires in res[2:] ];
#including the edges
res[1]+=1;
res[3]+=1;
restrict(d,res);
elif opts['--restrict']:
res = parse_ituple(opts['--restrict'],length=None);
restrict(d,res);
x,y = d['x']*1e4, d[ylabel]*1e4;
#massaging data
t = d['t'];
q = d[quantity];
#####################################
#plotting
#getting options from user
mn,mx = parse_ftuple(opts['--lims'],length=2);
if opts['--flip']:
rot,flip = False, True;
else:
rot,flip = True, False;
#plot the density
#orientation of colorbar
if opts['--orientation'] == "V":
orient = "vertical"
elif opts['--orientation'] == "H":
orient = "horizontal"
else:
print('orientation must be either "V" or "H"');
print(__doc__);
quit();
r=pc(
q,(x,y), lims=(mn,mx),log=opts['--log10'],
clabel=units, title=titlestr,
agg=not opts['--show'],
flip=flip,
rotate=rot,
orient=orient,
nofloor=opts['--nofloor'],
cmap=opts['--cmap'],
);
if opts['--highlight'] and opts['--highlight'] != "None" and opts['--highlight'] != 'none':
myhi = float(opts['--highlight']);
highlight(
r, myhi,
color="lightyellow", alpha=0.5);
if opts['--laser']:
laser = S(d);
print(laser.shape);
I = float(opts['--intensity']);
highlight(r, I, q=laser,
color="red", alpha=0.15);
import matplotlib.pyplot as plt;
toff=float(opts['--t-offset']);
timelabel(
r,
'time={:.2f} fs'.format(t*1e6+toff),
size=11,
color='white');
if opts['--equal']:
plt.axis('equal');
r['axes'].autoscale(tight=True);
if opts['--no-ticks']:
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
right='off',
left='off');
if opts['--show']:
plt.show();
else:
plt.savefig(opts['<outname>']);
| [
"ngirmang.1@osu.edu"
] | ngirmang.1@osu.edu |
eeda3980822e8716c6c500124cb43778dd7b264c | 570233f90e10dcfae793c01af1b2318c2b237775 | /pg58.py | 21d378f224565101eb5ba14bddb335be2e0664ab | [] | no_license | aarthisandhiya/codekata_player-py- | 90218d4f010caa688547aa90a6faf7d74121e6a3 | 38220ee5e2ad7764ab49514b1351339da534618a | refs/heads/master | 2020-05-29T15:16:42.306444 | 2019-05-08T06:42:50 | 2019-05-08T06:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | sen=str(input())
sen=sen.split()
string=str(input())
c=0
for i in range(0,len(sen)):
if(sen[i]==string):
c=c+1
print(c)
| [
"noreply@github.com"
] | aarthisandhiya.noreply@github.com |
878ce48ea1742673841e76d7844c45d81e62b156 | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_tiedtkes_001/populate-decades-column.py | 02035ea25507a06e561210d718069c09883ba5c2 | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,317 | py | from openpyxl import load_workbook
filename = 'aalh_iit_tiedtkes_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 154
iterationrow = 7
targetcol = 15
decadescol = 14
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
testvar = str(ws.cell(row=iterationrow, column=targetcol).value)
if testvar == None:
ws.cell(row=iterationrow, column=decadescol).value = None
elif testvar.find('180') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1800s'
elif testvar.find('181') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1810s'
elif testvar.find('182') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1820s'
elif testvar.find('183') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1830s'
elif testvar.find('184') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1840s'
elif testvar.find('185') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1850s'
elif testvar.find('186') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1860s'
elif testvar.find('187') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1870s'
elif testvar.find('188') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1880s'
elif testvar.find('189') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1890s'
elif testvar.find('190') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1900s'
elif testvar.find('191') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1910s'
elif testvar.find('192') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1920s'
elif testvar.find('193') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1930s'
elif testvar.find('194') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1940s'
elif testvar.find('195') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1950s'
elif testvar.find('196') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1960s'
elif testvar.find('197') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1970s'
elif testvar.find('198') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1980s'
elif testvar.find('199') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '1990s'
elif testvar.find('200') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '2000s'
elif testvar.find('201') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '2010s'
elif testvar.find('202') != -1:
ws.cell(row=iterationrow, column=decadescol).value = '2020s'
print(iterationrow,'|',testvar,'|',ws.cell(row=iterationrow, column=decadescol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_tiedtkes_001.xlsx') | [
"noreply@github.com"
] | johndewees.noreply@github.com |
eb5559ff8572a28b8cf7bc89c5975776da33d495 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/asuite/atest/test_runners/regression_test_runner.py | b71634fb8be8fc3a0f62d9a949cd439fe1cf8465 | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | # Copyright 2018, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Regression Detection test runner class.
"""
import constants
from test_runners import test_runner_base
class RegressionTestRunner(test_runner_base.TestRunnerBase):
"""Regression Test Runner class."""
NAME = 'RegressionTestRunner'
EXECUTABLE = 'tradefed.sh'
_RUN_CMD = '{exe} run commandAndExit regression -n {args}'
_BUILD_REQ = {'tradefed-core', constants.ATEST_TF_MODULE}
def __init__(self, results_dir):
"""Init stuff for base class."""
super(RegressionTestRunner, self).__init__(results_dir)
self.run_cmd_dict = {'exe': self.EXECUTABLE,
'args': ''}
# pylint: disable=unused-argument
def run_tests(self, test_infos, extra_args, reporter):
"""Run the list of test_infos.
Args:
test_infos: List of TestInfo.
extra_args: Dict of args to add to regression detection test run.
reporter: A ResultReporter instance.
Returns:
Return code of the process for running tests.
"""
run_cmds = self.generate_run_commands(test_infos, extra_args)
proc = super(RegressionTestRunner, self).run(run_cmds[0],
output_to_stdout=True)
proc.wait()
return proc.returncode
# pylint: disable=unnecessary-pass
# Please keep above disable flag to ensure host_env_check is overriden.
def host_env_check(self):
"""Check that host env has everything we need.
We actually can assume the host env is fine because we have the same
requirements that atest has. Update this to check for android env vars
if that changes.
"""
pass
def get_test_runner_build_reqs(self):
"""Return the build requirements.
Returns:
Set of build targets.
"""
return self._BUILD_REQ
# pylint: disable=unused-argument
def generate_run_commands(self, test_infos, extra_args, port=None):
"""Generate a list of run commands from TestInfos.
Args:
test_infos: A set of TestInfo instances.
extra_args: A Dict of extra args to append.
port: Optional. An int of the port number to send events to.
Subprocess reporter in TF won't try to connect if it's None.
Returns:
A list that contains the string of atest tradefed run command.
Only one command is returned.
"""
pre = extra_args.pop(constants.PRE_PATCH_FOLDER)
post = extra_args.pop(constants.POST_PATCH_FOLDER)
args = ['--pre-patch-metrics', pre, '--post-patch-metrics', post]
self.run_cmd_dict['args'] = ' '.join(args)
run_cmd = self._RUN_CMD.format(**self.run_cmd_dict)
return [run_cmd]
| [
"rick_tan@qq.com"
] | rick_tan@qq.com |
4d4ea052f312f76a8e371c23b614d640aaf2acc4 | fc3bdcbe68de7fce51b7b63158597ee70e03b3cc | /database.py | b0481854957bf4d62009c63176545192dca1bb8b | [] | no_license | claraj/peewee_tree_unittest | 70381ebfe6baee92c85539078855188da8b769d3 | e312f45fa73376c05288d604ee3b273d095a4f27 | refs/heads/main | 2021-09-26T16:39:14.219177 | 2021-09-22T23:03:40 | 2021-09-22T23:03:40 | 244,816,502 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from peewee import IntegrityError
from models import Tree
def add_tree(name, max_height):
try:
Tree(name=name, max_height=max_height).save()
except IntegrityError as e:
raise TreeError('Error adding tree because ' + str(e))
def get_all_trees():
result = Tree.select().execute()
return list(result)
def delete_tree_by_name(name):
trees_deleted = Tree.delete().where(Tree.name==name).execute()
return trees_deleted > 0
class TreeError(Exception):
pass | [
"10088152+claraj@users.noreply.github.com"
] | 10088152+claraj@users.noreply.github.com |
42b387187d177114e02e33bc0d8ccc6dbde81012 | 5c83c974b36505af76e97ecd7115e1437044c9f3 | /src/collections/nijl/createCollectionFromLocalJson.py | d51a43e579976e407deac725beba17da0c9bc93b | [
"CC0-1.0"
] | permissive | otani0083/iiif | ac6cf617ea79e5088ee8d49cbe69b3164e9706b6 | 358d91aef10d366ebb54a64990bde51debeee1f1 | refs/heads/master | 2021-05-21T07:33:05.411522 | 2020-04-03T01:08:12 | 2020-04-03T01:08:12 | 252,602,479 | 0 | 0 | CC0-1.0 | 2020-04-03T01:18:58 | 2020-04-03T01:18:58 | null | UTF-8 | Python | false | false | 2,406 | py | import json
import argparse
import sys
import glob
import time
collection_name = "nijl"
input_dir = "../../../json/collections/" + collection_name
output_path = "../../../docs/data/collection/collections/" + collection_name + ".json"
files = glob.glob(input_dir + "/*.json")
manifests = []
license_check = {}
for i in range(len(files)):
if i % 100 == 0:
print(str(i+1)+"/"+str(len(files)))
file = files[i]
with open(file, 'r') as f:
data = json.load(f)
if "@type" in data and data["@type"] == "sc:Manifest":
manifest = data["@id"]
print(manifest)
label = ""
if "label" in data:
label = data["label"]
manifest_obj = dict()
manifest_obj["@id"] = manifest
manifest_obj["@type"] = "sc:Manifest"
manifest_obj["label"] = label
canvases = data["sequences"][0]["canvases"]
# canvasが空
if len(canvases) == 0:
continue
canvas = canvases[0]
resource = canvas["images"][0]["resource"]
thumbnail = ""
if "service" in resource:
thumbnail = resource["service"]["@id"] + \
"/full/200,/0/default.jpg"
else:
thumbnail = canvas["thumbnail"]["@id"]
if thumbnail != "":
manifest_obj["thumbnail"] = thumbnail
flg = False
license = data["license"]
manifest_obj["license"] = license
if license != "http://kotenseki.nijl.ac.jp/page/usage.html":
flg = True
if license not in license_check:
license_check[license] = 0
license_check[license] += 1
if flg:
manifests.append(manifest_obj)
print(license_check)
collection = dict()
collection["@context"] = "http://iiif.io/api/presentation/2/context.json"
collection["@id"] = "https://nakamura196.github.io/iiif/data/collection/collections/" + collection_name + ".json"
collection["@type"] = "sc:Collection"
collection["vhint"] = "use-thumb"
collection["manifests"] = manifests
fw = open(output_path, 'w')
json.dump(collection, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ')) | [
"na.kamura.1263@gmail.com"
] | na.kamura.1263@gmail.com |
30f0f0d0a7e276bc44314c6d5123bdbc0f1685d0 | 1d91df48bfb43f15c1e567f8c8f2de6eef2b1dea | /test.py | 27d1ad5489789d8c33458bc2bd93d712c8a194df | [
"MIT"
] | permissive | dtekluva/Simple-Currency-recognition | 513cabcd6b30d43bb516d747992d81596d604e03 | 2bdb56d752b8b37aca62732cfbbffeb11beaed94 | refs/heads/master | 2021-05-12T11:29:22.023463 | 2018-01-15T23:31:03 | 2018-01-15T23:31:03 | 117,387,748 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | """**Test memory data against input data"""
import mem_io as write
from functools import reduce
def resolve(imageArray):
import vectorize as vectorize
newarr = imageArray
newimg = []
targ = []
for pix in imageArray:
newimg.append(vectorize.scale(pix))
for pix in newimg:
targ.append(vectorize.further_reduce(pix))
return targ
def deviation(test_img):
mem = write.io()
deviated_copy = mem
newimg = resolve(test_img)
for key in mem:
i = 0
mean_dev = []
for val in newimg:
dev = newimg[i][0] - mem[key][i][0]
mean_dev.append(dev)
i+=1
deviated_copy[key] = mean_dev
return deviated_copy
def test(newarr, mem):
mem = ""
newarr = deviation(newarr)
result = newarr #copying newarr so thet result can maintain a similar dict structure as newarr for easy compilation
for key in newarr:
i = 0
for item in range(len(newarr[key])):
if newarr[key][i] < 0:
newarr[key][i] = newarr[key][i] * -1
i +=1
tot_dev = reduce(lambda x, y: x + y,newarr[key])
newarr[key] = (1-((2284800 - tot_dev)/2284800))
decision = min(result.items(),key = lambda x:x[1])
if "20" in decision[0]:
print("20 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%")
elif "100" in decision[0]:
print("100 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%")
elif "200" in decision[0]:
print("200 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%")
elif "500" in decision[0]:
print("500 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%")
elif "1000" in decision[0]:
print("1000 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%")
elif "50" in decision[0]:
print("50 Naira note, " + " decision confidence "+ confidence(decision[1]) + "%" )
def confidence(val):
if val > 1:
val = int(val)
conf = (1 - val) * 100
return str(conf) | [
"31896598+dtekluva@users.noreply.github.com"
] | 31896598+dtekluva@users.noreply.github.com |
6dbf49b1e7581c9482c56f08401812a37b784272 | f1614f3531701a29a33d90c31ab9dd6211c60c6b | /menu_sun_api/domain/model/order/order_repository.py | 7c755dc5edbe830e08bff96acd76103012b915aa | [] | no_license | pfpacheco/menu-sun-api | 8a1e11543b65db91d606b2f3098847e3cc5f2092 | 9bf2885f219b8f75d39e26fd61bebcaddcd2528b | refs/heads/master | 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,193 | py | from sqlalchemy import and_
from menu_sun_api.domain.db_repository import DBRepository
from menu_sun_api.domain.model.order.order import Order, OrderStatus, OrderStatusType
import logging
from menu_sun_api.shared.specification import Specification
logger = logging.getLogger()
class OrderRepository(DBRepository):
def __init__(self, session=None):
super().__init__(Order, session)
def get_order(self, seller_id, order_id):
query = self.session.query(Order). \
filter(
and_(
Order.seller_id == seller_id,
Order.order_id == order_id))
order = query.one_or_none()
return order
def get_order_not_seller_id(self, order_id):
query = self.session.query(Order). \
filter(
and_(
Order.order_id == order_id))
order = query.one_or_none()
return order
def load_order_status(self, seller_id, order_id):
query = self.session.query(OrderStatus). \
outerjoin(Order). \
filter(and_(Order.seller_id == seller_id, Order.order_id == order_id)). \
order_by(OrderStatus.id)
statuses = query.all()
return statuses
def append_status(self, status):
self.session.add(status)
def load_pending_orders(self, seller_id):
orders = self.session.query(Order). \
filter(
and_(
Order.order_queue_date.is_(None),
Order.seller_id == seller_id)).all()
ls = []
for order in orders:
logger.info('Filtering order: [{}]'.format(order.order_id))
status = order.status
if status:
logger.info('Order status: [{}]'.format(status.status))
if status.status == OrderStatusType.APPROVED:
ls.append(order)
return ls
def mark_as_integrated(self, seller_id, order_id):
from datetime import datetime
order = self.get_order(seller_id=seller_id, order_id=order_id)
order.integration_date = datetime.utcnow()
def load_orders_on_wms(self, seller_id):
orders = self.session.query(Order). \
filter(and_(Order.integration_date is not None, Order.seller_id == seller_id)). \
filter(and_(Order.created_date >= '2020-02-01')). \
all()
ls = []
for order in orders:
if order.on_wms():
logger.info('Filtering order: [{}]'.format(order.order_id))
ls.append(order)
return ls
def load_order_by_order_id(self, seller_id, order_id):
orders = self.session.query(Order). \
filter(and_(Order.integration_date is not None, Order.seller_id == seller_id)). \
filter(and_(Order.order_id == order_id)). \
limit(1)
ls = []
for order in orders:
if order.on_wms():
logger.info('Filtering order: [{}]'.format(order.order_id))
ls.append(order)
return ls
def list_orders_with_unpublished_status(self, seller_id):
stmt = self.session.query(OrderStatus.order_id). \
join(Order). \
filter(and_(OrderStatus.published_date is None,
Order.seller_id == seller_id)).distinct()
statuses = self.session.query(Order). \
filter(Order.id.in_(stmt)).all()
return statuses
def filter_by_specification(self, seller_id: int, specification: Specification):
query = self.session.query(Order)
return query.filter(specification.is_satisfied_by(seller_id)).all()
def list_orders_by_status(self, seller_id, status_filter):
orders = self.session.query(Order). \
filter(Order.seller_id == seller_id).all()
logger.info('Filtering order by status: [{}]'.format(status_filter))
logger.info('Filtering all orders: {}'.format(list(map(lambda x: str(x), orders))))
result = [order for order in orders if order.status.status == status_filter]
logger.info('Filtered orders: {}'.format(list(map(lambda x: str(x), result))))
return result
| [
"pfpacheco@gmail.com"
] | pfpacheco@gmail.com |
31ce100735578008d5eacc8a2d850897b2864889 | 810305a5f4d9592e81381c252ab24be43d33817e | /aishack/migrations/0002_auto__add_field_tutorial_author.py | 2d4216657dffd04b8ced32271fc7a5d0f0891b78 | [] | no_license | awal123/aishack | 9dbfcbb329b35674c6f0a15c6dfc9de39ba34d05 | 5a16efca42899f3ec1495a509fe801348f3933ac | refs/heads/master | 2021-01-22T21:23:11.168117 | 2014-08-31T05:02:36 | 2014-08-31T05:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,076 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tutorial.author'
db.add_column(u'aishack_tutorial', 'author',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Tutorial.author'
db.delete_column(u'aishack_tutorial', 'author_id')
models = {
u'aishack.category': {
'Meta': {'object_name': 'Category'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
},
u'aishack.quiz': {
'Meta': {'object_name': 'Quiz'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'aishack.track': {
'Meta': {'object_name': 'Track'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
},
u'aishack.tracktutorials': {
'Meta': {'object_name': 'TrackTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Track']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Tutorial']"})
},
u'aishack.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Category']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_image': ('django.db.models.fields.URLField', [], {'max_length': '256'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['aishack'] | [
"sinha.utkarsh1990@gmail.com"
] | sinha.utkarsh1990@gmail.com |
693933d07fc47fcbfe96c3cef05720885ce398ed | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/28/usersdata/109/8524/submittedfiles/serie1.py | a5b0d2c43f8323230a99f28b4d2d20ff6ee4445b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Digite o valor de n:')
x=1
Cont=0
for i in range (0,n+1,1):
if x%2==0:
x=(x*(-1))
a=x//(x**2)
cont=cont+a
i=i+1
print ('%.5f'%cont) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e5e8a66a4d5db35873d8ab76575a05bd8dd038cd | 2687b15ecde22e25a3ec89abb4e380af283cd31d | /CMGTools/H2TauTau/python/proto/samples/run2011/tauMu_ColinOct23_Down.py | f2802c9d00cbbc85be154efa0acdfa86c1d528c0 | [] | no_license | francescobrivio/cmg-cmssw | bbd60e0dafece3caf83ab969b67b636541dac3d9 | 2b3d8444249ac00c769d2974d604a5e6a698d6ff | refs/heads/CMGTools-from-CMSSW_7_2_3 | 2020-12-31T06:22:11.695970 | 2015-04-08T07:40:23 | 2015-04-08T07:40:23 | 33,541,974 | 0 | 0 | null | 2015-04-07T12:32:40 | 2015-04-07T12:32:39 | null | UTF-8 | Python | false | false | 2,958 | py | import itertools
import copy
from CMGTools.RootTools.fwlite.Config import printComps
from CMGTools.RootTools.utils.connect import connect
from CMGTools.RootTools.utils.splitFactor import splitFactor
from CMGTools.H2TauTau.proto.samples.run2011.data import *
from CMGTools.H2TauTau.proto.samples.run2011.embed import *
from CMGTools.H2TauTau.proto.samples.run2011.ewk import *
from CMGTools.H2TauTau.proto.samples.run2011.diboson import *
from CMGTools.H2TauTau.proto.samples.run2011.higgs import *
from CMGTools.H2TauTau.proto.samples.run2011.triggers_tauMu import data_triggers_2011A, data_triggers_2011B, mc_triggers
aliases = {
'/VBF_HToTauTau.*START42.*':'HiggsVBF',
'/GluGluToHToTauTau.*START42.*':'HiggsGGH',
'/WH_ZH_TTH_HToTauTau.*START42.*':'HiggsVH',
'/DYJets.*START42.*':'DYJets',
## '/WJetsToLNu.*START42.*':'WJets',
## '/W1Jet.*START42.*':'W1Jets',
## '/W2Jets.*START42.*':'W2Jets',
## '/W3Jets.*START42.*':'W3Jets',
## '/W4Jets.*START42.*':'W4Jets',
## '/TTJets.*START42.*':'TTJets',
## '/T_TuneZ2_tW-channel.*START42.*':'T_tW',
## '/Tbar_TuneZ2_tW-channel.*START42.*':'Tbar_tW',
## '/TauPlusX/Run2011A-03Oct2011-v1.*':'data_Run2011A_03Oct2011_v1',
## '/TauPlusX/Run2011A-05Aug2011-v1.*':'data_Run2011A_05Aug2011_v1',
## '/TauPlusX/Run2011A-May10ReReco-v1.*':'data_Run2011A_May10ReReco_v1',
## '/TauPlusX/Run2011A-PromptReco-v4.*':'data_Run2011A_PromptReco_v4',
## '/TauPlusX/Run2011B-PromptReco-v1':'data_Run2011B_PromptReco_v1',
'/DoubleMu/StoreResults-DoubleMu_2011A_03Oct2011_v1.*':'embed_Run2011A_03Oct2011_v1',
'/DoubleMu/StoreResults-DoubleMu_2011A_05Aug2011_v1.*':'embed_Run2011A_05Aug2011_v1',
'/DoubleMu/StoreResults-DoubleMu_2011A_10May2011_v1.*':'embed_Run2011A_May10ReReco_v1',
'/DoubleMu/StoreResults-DoubleMu_2011A_PR_v4.*':'embed_Run2011A_PromptReco_v4',
'/DoubleMu/StoreResults-DoubleMu_2011B_PR_v1.*':'embed_Run2011B_PromptReco_v1',
## '/WW_TuneZ2.*START42.*':'WW',
## '/WZ_TuneZ2.*START42.*':'WZ',
## '/ZZ_TuneZ2.*START42.*':'ZZ',
## '/WWJetsTo2L2Nu.*START42.*':'WWJetsTo2L2Nu',
## '/WZJetsTo2L2Q.*START42.*':'WZJetsTo2L2Q',
## '/WZJetsTo3LNu.*START42.*':'WZJetsTo3LNu',
## '/ZZJetsTo2L2Nu.*START42.*':'ZZJetsTo2L2Nu',
## '/ZZJetsTo2L2Q.*START42.*':'ZZJetsTo2L2Q',
## '/ZZJetsTo4L.*START42.*':'ZZJetsTo4L',
}
MC_list = copy.copy( mc_ewk )
MC_list.extend( mc_higgs )
MC_list.extend( mc_diboson )
for sam in MC_list:
sam.triggers = mc_triggers
for data in data_list_2011A:
data.triggers = data_triggers_2011A
for data in data_list_2011B:
data.triggers = data_triggers_2011B
allsamples = copy.copy(MC_list)
allsamples.extend( data_list_2011 )
allsamples.extend( embed_list_2011 )
connect( allsamples, '%TAUMU_Down_ColinOct30', 'tauMu.*root', aliases, cache=True, verbose=False)
Tbar_tW.nGenEvents = 809984.
for c in allsamples:
c.splitFactor = splitFactor(c)
| [
"jan.steggemann@cern.ch"
] | jan.steggemann@cern.ch |
e0a1e53b3edd3865eab6b41b6d5486a7c4f7520b | 372e299709a70e30a8487dbaa669a4f572d99df0 | /2015/src/day_06/part_1.py | 20caf305073a30a2c72c51e8e72822fe24fd49bc | [] | no_license | mkierc/advent-of-code | e806e30cbed5bb8224b28583b8943c58ba0f80e0 | f5115403fcff0082e1cd33308a0259b234c58229 | refs/heads/master | 2021-12-25T05:43:24.673107 | 2021-12-17T07:56:20 | 2021-12-17T07:56:20 | 76,584,172 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | import re
import matplotlib.pyplot as plt
import numpy
with open('data.txt') as file:
input_data = file.read().splitlines()
def turn_on(screen, x_1, y_1, x_2, y_2):
for row in range(x_1, x_2):
for column in range(y_1, y_2):
screen[column][row] = 1
def turn_off(screen, x_1, y_1, x_2, y_2):
for row in range(x_1, x_2):
for column in range(y_1, y_2):
screen[column][row] = 0
def toggle(screen, x_1, y_1, x_2, y_2):
for row in range(x_1, x_2):
for column in range(y_1, y_2):
if screen[column][row] == 0:
screen[column][row] = 1
elif screen[column][row] == 1:
screen[column][row] = 0
else:
raise NotImplementedError(screen[column][row])
def parse_instruction(instruction):
x_1, y_1, x_2, y_2 = re.search(r'(\d+),(\d+) through (\d+),(\d+)', instruction).groups()
if instruction.startswith('turn on'):
return turn_on, int(x_1), int(y_1), int(x_2) + 1, int(y_2) + 1
elif instruction.startswith('turn off'):
return turn_off, int(x_1), int(y_1), int(x_2) + 1, int(y_2) + 1
elif instruction.startswith('toggle'):
return toggle, int(x_1), int(y_1), int(x_2) + 1, int(y_2) + 1
else:
raise NotImplementedError
def deploy_lights(instruction_list):
screen = [[0 for _ in range(1000)] for _ in range(1000)]
# process instructions and apply functions
for instruction in instruction_list:
_function, x_1, y_1, x_2, y_2 = parse_instruction(instruction)
_function(screen, x_1, y_1, x_2, y_2)
# count the lights
lights_count = 0
for row in screen:
lights_count += row.count(1)
# draw the image to satisfy my curiosity
numpy_array = numpy.array([numpy.array(row) for row in screen])
plt.imsave('image_1.png', numpy_array, cmap='Greys')
return lights_count
def main():
answer = deploy_lights(input_data)
print('answer:', answer)
if __name__ == '__main__':
main()
| [
"urban.pl@gmail.com"
] | urban.pl@gmail.com |
04224bb729241e830197f141c5352092b82bd014 | 7ea93ebddf0eb742fd8d499d5bd9ce68bc5aadd5 | /app/__init__.py | 45ff7e8597f8c6e24fa86fb74eb0d3b98ae2f130 | [] | no_license | wma8/websitess | f41ebbaeb359ce40acce16b2ebdc976a57c39e6c | 76b82b61b34d6b5704920e05e017007a4166ec57 | refs/heads/master | 2023-02-09T09:18:43.321640 | 2019-06-26T11:06:55 | 2019-06-26T11:06:55 | 193,888,681 | 0 | 0 | null | 2023-02-02T06:32:51 | 2019-06-26T11:09:05 | Python | UTF-8 | Python | false | false | 479 | py | from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from config import config
from flask import Flask
import pymysql
pymysql.install_as_MySQLdb()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
Bootstrap(app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| [
"wma8@ncsu.edu"
] | wma8@ncsu.edu |
62d8a1e1ebc3c8d291c1aac3ad32a57ed5088219 | 7ac1f3e38dab2899d6dc0d02cc1ace3934fb0805 | /IR/text codes/boolRet.txt | 3c3a38f9c6bb1ee3abedbd677eccc255f149a349 | [] | no_license | amanbhal/pythonCodes | 3fd9357211fe7d06c6972e7a4f469df1ff3cf60a | 49d17ce395d15e7c8497af8455790ecb876a0d49 | refs/heads/master | 2016-08-12T06:12:19.108863 | 2015-11-16T20:42:11 | 2015-11-16T20:42:11 | 46,301,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | txt | #! /usr/bin/python
from helpers import getStopList, getFileList, growDict, printDict
def main():
stopLst = getStopList("../Files/")
[fList, tPath] = getFileList("../Files/","sampleDocs/")
tokDict = {}
for f in fList:
tokDict = growDict(tokDict, f, fList.index(f), tPath,\
stopLst, len(fList), 1)
query = queryInput()
#printDict(tokDict, 1)
print query
boolSearch(tokDict, query, fList)
def boolSearch(tokDict, query, fList):
qTokens = query.split()
qLen = len(qTokens)
res = []
count = 0
parRes = fList[:]
while True:
term = qTokens[count]
if not term in tokDict:
parRes = []
tempRes = parRes[:]
for f in parRes:
ind = fList.index(f)
if not tokDict[term][ind]:
tempRes.remove(f)
parRes = tempRes[:]
if count == (qLen - 1):
print parRes
res = list( set(res) | set(parRes) )
break
count += 1
op = qTokens[count]
count += 1
if op == 'or':
print parRes
res = list( set(res) | set(parRes) )
parRes = fList[:]
print sorted(res)
def queryInput():
cont = 1
query = ''
while cont != 3:
term = raw_input("Term\n-> ")
if ( not term.isalpha() ) and ( not term.isdigit() ):
continue
query += term
cont = raw_input("And : 1, Or : 2, End : 3\n-> ")
if int(cont) == 1:
query += ' and '
elif int(cont) == 2:
query += ' or '
else:
cont = int(cont)
return query
if __name__ == "__main__":
main()
| [
"amandeep.bhal92@gmail.com"
] | amandeep.bhal92@gmail.com |
8b911d329c8bec2537e8276d843ea45bea74e467 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc023/C/answers/112335_Gale.py | 931904830969cd7b6dfccc1f26e1805b2573aa71 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | ame = set()
r, c, k = map(int, input().split())
row_n = [0] * (r + 1)
col_n = [0] * (c + 1)
row_nn = [0] * 100001
col_nn = [0] * 100001
n = int(input())
for _ in range(n):
rr, cc = map(int, input().split())
row_n[rr] += 1
col_n[cc] += 1
ame.add((rr, cc))
for i in range(1, r + 1):
row_nn[row_n[i]] += 1
for i in range(1, c + 1):
col_nn[col_n[i]] += 1
ans = 0
for i in range(k + 1):
ans += row_nn[i] * col_nn[k - i]
for rr, cc in ame:
num = row_n[rr] + col_n[cc]
if num == k:
ans -= 1
if num == k + 1:
ans += 1
print(ans)
| [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
87e43a21fec53e30d2d5f8bebe82dd4feb5829bd | 32c4a3f8893a7fe9039ebfb2d98215e06203f8f2 | /tests/tensor/coordinate/system/axis/test__axis.py | c8aad570b9de1b5b0b45762a0dd50d4202c819ce | [
"Apache-2.0"
] | permissive | jedhsu/tensor | d27c8951aa32208e3c5bbcef0d0f2bae56f8a670 | 3b2fe21029fa7c50b034190e77d79d1a94ea5e8f | refs/heads/main | 2023-07-06T11:35:36.307538 | 2021-08-07T20:11:19 | 2021-08-07T20:11:19 | 366,904,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
*Axis, [Unit Tests]*
"""
from tensor.tensor.coordinate.system.axis._axis import Axis
class TestAxis:
def test_init(self):
a = Axis(5, 0, 1)
assert isinstance(a, Axis)
assert a.ordinal == 5
assert a.origin == 0
assert a.direction == 1
def test_create(self):
a = Axis.create(5)
assert a.origin == 0
assert a.direction == 1
| [
"jed910@gmail.com"
] | jed910@gmail.com |
ca35767c9da032c4f339de5aa5a46487243fc984 | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/compat/__init__.pyi | 3d2128b4dcb1f4be3619e7a503336c8e387ce6bb | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 803 | pyi | # Stubs for pandas.compat (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
# pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg
# pylint: disable=super-init-not-called,abstract-method,redefined-builtin
from typing import Any
PY36: Any
PY37: Any
PYPY: Any
def set_function_name(f: Any, name: Any, cls: Any) -> Any:
...
def raise_with_traceback(exc: Any, traceback: Any = ...) -> None:
...
def is_platform_little_endian():
...
def is_platform_windows():
...
def is_platform_linux():
...
def is_platform_mac():
...
def is_platform_32bit():
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
50610a3f906a7a87156de10f1d4f14ee940cbcb2 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/959_regions_cut_by_slashes.py | b72a08453f9c8c421f6efe61aa9d4386f67a0fa4 | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | class Solution:
def regionsBySlashes(self, grid: List[str]) -> int:
n = len(grid)
seen = set()
res = 0
for i in range(n):
for j in range(n):
if (i, j, 0) not in seen:
self._dfs(grid, i, j, 0, seen)
res += 1
if (i, j, 1) not in seen:
self._dfs(grid, i, j, 1, seen)
res += 1
return res
def _dfs(self, grid, i, j, a, seen):
if (i, j, a) in seen:
return
n = len(grid)
seen.add((i, j, a))
if grid[i][j] == ' ':
if a == 0:
self._dfs(grid, i, j, 1, seen)
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
else:
self._dfs(grid, i, j, 0, seen)
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
elif grid[i][j] == '\\':
if a == 0:
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
else:
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
else:
if a == 0:
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
else:
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
| [
"ypeng1@andrew.cmu.edu"
] | ypeng1@andrew.cmu.edu |
df50b05cf5f33bf54acad8204d9968987e7e4ba3 | d51b8b1b55bbcdea55d6ab2b0a97c03cd290868d | /revivalkit/log.py | b03be3472a8d2de8da12f422fc34225da06ed095 | [] | no_license | gogobook/revivalkit | 2c48f9a032b159589a1e8f5b515a22b7fc4e0123 | 3f1eccc6bd80a2c0c4ad7ab60491c98a8d9cd632 | refs/heads/master | 2021-01-16T18:07:53.065671 | 2015-12-12T07:40:32 | 2015-12-12T07:40:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from __future__ import print_function
import sys
to_print_debug = False
def debug(*args, **arg_ds):
if not to_print_debug: return
print('revivalkit:debug:', *args, file=sys.stderr, **arg_ds)
| [
"mosky.tw@gmail.com"
] | mosky.tw@gmail.com |
1ca5712af3da706bb53d3661f958c30321305c1f | 2fe58e7f6bfc3efdb78ca56f72a4e2a75a24c270 | /eric/eric6/Plugins/__init__.py | fe0b347045e010e41b7ae1988a28bc90b6118bc1 | [] | no_license | testerclub/eric6-20.3 | 3053e0e6962060b213f5df329ee331a4893d18e6 | bba0b9f13fa3eb84938422732d751219bc3e29e2 | refs/heads/master | 2023-03-18T08:24:03.472297 | 2020-03-14T06:44:14 | 2020-03-14T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2020 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package containing all core plugins.
"""
| [
"skeptycal@gmail.com"
] | skeptycal@gmail.com |
fe6f4c64becb61733511e9dd29f3fa33cfdeb957 | aa13e1d93b7a8017e1e610a900bd05f6df91604f | /hackerrank/contests/hourrank7/array-splitting.py | 3e61e19ff22b66ac69f49c320ebb26f4cad2c1db | [] | no_license | laveesingh/Competitive-Programming | 3ce3272eab525635f9ce400f7467ee09de8b51df | 41047f47c23bc8572a1891e891a03cc3f751e588 | refs/heads/master | 2021-01-24T09:51:00.332543 | 2017-10-30T17:11:48 | 2017-10-30T17:11:48 | 52,956,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | from pprint import pprint
ans = 0
# dict[tuple] = (divided tuple of list), value
def isPos(a, n):
s = 0
s2 = sum(a)
for i in xrange(len(a)-1):
s += a[i]
s2 -= a[i]
if s == s2 and i != len(a)-1:
return (a[:i+1],a[i+1:], n+1)
return False
store = {}
def solve(a, n):
x = isPos(a, n)
if x:
store[tuple(a)] = x
a1 = x[0]
a2 = x[1]
solve(a1, n+1)
solve(a2, n+1)
for _ in xrange(input()):
n = input()
a = map(int, raw_input().split())
store = {}
solve(a, 0)
ans = 0
for s in store:
if store[s][2] > ans:
ans = store[s][2]
print ans
# def case():
# n = random.randint(1,9)
# print n
# for _ in xrange(n):
# l = random.randint(1,20)
# a = [random.randint(1,8) for i in xrange(l)]
# print l
# for s in a: print s,
# print | [
"laveechaudharytheone@gmail.com"
] | laveechaudharytheone@gmail.com |
0be8c0f0c2fd334cf4240d98ea194ea813adee91 | 2387caf918fa9109568f3f804377c409f7b40fe8 | /distance3d/hydroelastic_contact/_halfplanes.py | d43f8b1483633f57ef2fec53122454584ef197c4 | [
"Zlib",
"MIT",
"BSD-3-Clause",
"BSD-3-Clause-Clear",
"BSL-1.0",
"Unlicense"
] | permissive | AlexanderFabisch/distance3d | aed80c3c4f556f832a44c3b674760db20ef92f2d | 7b2098161a57253c68d3725d63ea235831d272eb | refs/heads/master | 2023-08-19T06:56:30.725164 | 2023-08-03T16:08:51 | 2023-08-03T16:08:51 | 476,051,225 | 30 | 5 | NOASSERTION | 2023-07-28T08:07:23 | 2022-03-30T21:05:48 | Python | UTF-8 | Python | false | false | 3,624 | py | import numba
import numpy as np
from ..utils import norm_vector, EPSILON
# replaces from numba.np.extensions import cross2d, which seems to have a bug
# when called with NUMBA_DISABLE_JIT=1
@numba.njit(
numba.float64(numba.float64[::1], numba.float64[::1]),
cache=True)
def cross2d(a, b):
return a[0] * b[1] - a[1] * b[0]
@numba.njit(
numba.float64[::1](numba.float64[::1], numba.float64[::1]),
cache=True)
def intersect_two_halfplanes(halfplane1, halfplane2):
denom = cross2d(halfplane1[2:], halfplane2[2:])
if abs(denom) < EPSILON:
return np.empty(0, dtype=np.dtype("float"))
t = cross2d((halfplane2[:2] - halfplane1[:2]), halfplane2[2:]) / denom
return halfplane1[:2] + halfplane1[2:] * t
@numba.njit(
numba.bool_(numba.float64[::1], numba.float64[::1]),
cache=True)
def point_outside_of_halfplane(halfplane, point):
return cross2d(halfplane[2:], point - halfplane[:2]) < -EPSILON
@numba.njit(
numba.float64[:, :](numba.float64[:, ::1]), cache=True)
def intersect_halfplanes(halfplanes):
"""Find polygon points by halfplane intersection.
Parameters
----------
halfplanes : array, shape (n_halfplanes, 4)
Halfplanes in contact plane. Each halfplane is defined by a point
p and a direction pq.
Returns
-------
points : list
Points of the polygon.
"""
# reserve more space than required, there might be duplicates
points = np.empty((3 * len(halfplanes), 2))
n_intersections = 0
for i in range(len(halfplanes)):
for j in range(i + 1, len(halfplanes)):
p = intersect_two_halfplanes(halfplanes[i], halfplanes[j])
if len(p) == 0: # parallel halfplanes
continue
valid = True
for k in range(len(halfplanes)):
if k != i and k != j and point_outside_of_halfplane(
halfplanes[k], p):
valid = False
break
if valid:
points[n_intersections] = p
n_intersections += 1
assert n_intersections < len(points)
return points[:n_intersections]
def plot_halfplanes_and_intersections(halfplanes, points=None, xlim=None, ylim=None): # pragma: no cover
import matplotlib.pyplot as plt
if points is None:
scale = 1.0
else:
center = np.mean(points, axis=0)
max_distance = max(np.linalg.norm(points - center, axis=1))
scale = 10.0 * max_distance
plt.figure()
ax = plt.subplot(111, aspect="equal")
for i, halfplane in enumerate(halfplanes):
c = "r" if i < len(halfplanes) // 2 else "b"
plot_halfplane(halfplane, ax, c, 0.5, scale)
if points is not None:
colors = ["r", "g", "b", "orange", "magenta", "brown", "k"][:len(points)]
if len(colors) < len(points):
colors.extend(["k"] * (len(points) - len(colors)))
plt.scatter(points[:, 0], points[:, 1], c=colors, s=100)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.show()
def plot_halfplane(halfplane, ax, c, alpha, scale): # pragma: no cover
line = (halfplane[:2] + np.linspace(-scale, scale, 101)[:, np.newaxis]
* norm_vector(halfplane[2:]))
ax.plot(line[:, 0], line[:, 1], lw=3, c=c, alpha=alpha)
normal2d = np.array([-halfplane[3], halfplane[2]])
for p in line[::10]:
normal = (p + np.linspace(0.0, 0.1 * scale, 101)[:, np.newaxis]
* norm_vector(normal2d))
ax.plot(normal[:, 0], normal[:, 1], c=c, alpha=0.5 * alpha)
| [
"afabisch@googlemail.com"
] | afabisch@googlemail.com |
f6f0da799baa0c953facd5a352662624b46d44c9 | f9e1d9c71d232aa0bcf03627259e6c9f88538b18 | /gs108ExtraMethodsOfClassBasedView/gs108/asgi.py | 2cd325422ba75b65fd77dbbeaf2e0635a1336874 | [] | no_license | nayan-gujju/Django-Practice | a7db202b6a3627a6a4e9f96953b61e43eaf68cb1 | eafa29e9321a1683867b2ea1d26ca74dfa6db12d | refs/heads/master | 2023-07-27T11:41:43.956705 | 2021-09-09T08:47:44 | 2021-09-09T08:47:44 | 403,917,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for gs108 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs108.settings')
application = get_asgi_application()
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
7947c7a7858c6399fa83aeee2c2115a32a62c5f5 | c6e5d5ff2ee796fd42d7895edd86a49144998067 | /platform/polycommon/polycommon/live_state.py | 2f5680bf039683418e6233de246bbf5e217f1810 | [
"Apache-2.0"
] | permissive | zeyaddeeb/polyaxon | f4481059f93d8b70fb3d41840a244cd9aaa871e0 | 1f2b236f3ef36cf2aec4ad9ec78520dcc9ef4ee5 | refs/heads/master | 2023-01-19T05:15:34.334784 | 2020-11-27T17:08:35 | 2020-11-27T17:08:35 | 297,410,504 | 0 | 0 | Apache-2.0 | 2020-09-21T17:20:27 | 2020-09-21T17:20:26 | null | UTF-8 | Python | false | false | 198 | py | STATE_LIVE = 1
STATE_ARCHIVED = 0
STATE_DELETION_PROGRESSING = -1
CHOICES = (
(STATE_LIVE, "live"),
(STATE_ARCHIVED, "archived"),
(STATE_DELETION_PROGRESSING, "deletion_progressing"),
)
| [
"contact@polyaxon.com"
] | contact@polyaxon.com |
55a04939f3799c5645a196ee9769032e5a0efd68 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_21/models/local_group_membership_post.py | fcc7518cae6098afbd8512b4e37e409abe4850f1 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,982 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_21 import models
class LocalGroupMembershipPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'members': 'list[LocalgroupmembershippostMembers]'
}
attribute_map = {
'members': 'members'
}
required_args = {
}
def __init__(
self,
members=None, # type: List[models.LocalgroupmembershippostMembers]
):
"""
Keyword args:
members (list[LocalgroupmembershippostMembers]): A list of resources to be a member of the group.
"""
if members is not None:
self.members = members
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LocalGroupMembershipPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LocalGroupMembershipPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
1e6853ac04cd59b0da08c492ad82eb7292a0b5e7 | 455706c02fb0a5dfcb29572779c2dde34ecb3c1c | /django_oopviews/base.py | 6f018dc6ed93cbd70df1ae51cd6070e7aca285df | [
"BSD-3-Clause"
] | permissive | zerok/django-oopviews | 581293aaab673559186e2e570f37bc4eea8d39ea | 8b80cae437b6089310ae12dd76532624c84db18b | refs/heads/master | 2016-09-05T13:16:34.013936 | 2008-11-11T00:46:35 | 2008-11-11T00:46:35 | 57,663 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | """
In some instances you end up producing tons of views that actually do mostly
the same except for perhaps one or two lines. This module offers you a simple
alternative::
from django_oopviews import create_view, BaseView
class View1(BaseView):
def __init__(self, request, *args, **kwargs):
# Here you have your common code
self.my_variable = 1
def __call__(self, request, *args, **kwargs):
whatever = self.my_variable + 1
return HttpResponse(whatever)
class View2(View1):
def __call__(self, request, *args, **kwargs):
return HttpResponse(self.my_variable)
view1 = create_view(View1)
view2 = create_view(View2)
In this example, the code in ``View1.__init__`` is shared between View1 and
View2, so you don't need to write it again.
If you want to share some HttpResponse post-processing, implement the
``BaseView.__after__(self, response_obj)`` method
For more details check out this `blog post`_
.. _blog post: http://zerokspot.com/weblog/1037/
"""
__all__ = ('create_view', 'BaseView', )
def create_view(klass):
"""
This is the generator function for your view. Simply pass it the class
of your view implementation (ideally a subclass of BaseView or at least
duck-type-compatible) and it will give you a function that you can
add to your urlconf.
"""
def _func(request, *args, **kwargs):
"""
Constructed function that actually creates and executes your view
instance.
"""
view_instance = klass(request, *args, **kwargs)
response = view_instance(request, *args, **kwargs)
after = getattr(view_instance, '__after__', None)
if after is None:
return response
else:
return view_instance.__after__(response)
setattr(_func, '_class', klass)
return _func
class BaseView(object):
"""
The Base-class for OOPViews. Inherit it and overwrite the __init__,
__call__ and/or __after__ methods.
"""
def __init__(self, request, *args, **kwargs):
"""
In the constructor you can easily aggregate common functinality.
"""
pass
def __call__(self, request, *args, **kwargs):
"""
This is the method where you want to put the part of your code, that
is absolutely view-specific.
"""
raise RuntimeError, "You have to override BaseView's __call__ method"
def __after__(self, response):
"""
If you want to share some response processing between multiple views
without using a middleware and filter the affected views there,
this method is for you.
"""
return response
| [
"zerok@zerokspot.com"
] | zerok@zerokspot.com |
67ceade67a7d9a435d33fe714ae6051a373d2f92 | e86dedc5b0bb79b9eba41e74c343e77bd1ee1512 | /lldb/test/API/commands/expression/import-std-module/sysroot/TestStdModuleSysroot.py | 014a35458d66fa1a5fc78a34595ca0b20de85127 | [
"NCSA",
"LLVM-exception",
"Apache-2.0"
] | permissive | shafik/llvm-project | a5e1b66fb053f9aa01720a40ea7985b4cc57d16f | be556c838de06c3c2f69bf594996cace6ffa17eb | refs/heads/main | 2023-05-28T22:35:12.937142 | 2023-05-16T18:22:53 | 2023-05-16T18:25:41 | 221,325,771 | 0 | 0 | Apache-2.0 | 2019-11-12T22:40:44 | 2019-11-12T22:40:44 | null | UTF-8 | Python | false | false | 1,493 | py | """
Test that we respect the sysroot when building the std module.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
# We only emulate a fake libc++ in this test and don't use the real libc++,
# but we still add the libc++ category so that this test is only run in
# test configurations where libc++ is actually supposed to be tested.
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
@skipIfRemote # This test messes with the platform, can't be run remotely.
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host",
CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
# Call our custom function in our sysroot std module.
# If this gives us the correct result, then we used the sysroot.
# We rely on the default argument of -123 to make sure we actually have the C++ module.
# (We don't have default arguments in the debug information).
self.expect("expr std::myabs()", substrs=['(int) $0 = 123'])
| [
"teemperor@gmail.com"
] | teemperor@gmail.com |
6a3c75482f7f16ad0223ab79c872be430da13d6f | cdaeb2c9bbb949b817f9139db2d18120c70f1694 | /rakam_client/models/error_message.py | db883d4928c04f945e167febf9fbc9a5b5a2119b | [
"Apache-2.0"
] | permissive | sreekanthpulagam/rakam-python-client | 665c984ac7a29b57ead6feaeb99a69ba345220e6 | 8bd843208b03726d6ce89ee343b48b889b576e0e | refs/heads/master | 2021-01-24T15:42:36.374366 | 2016-07-19T21:49:26 | 2016-07-19T21:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | # coding: utf-8
"""
Rakam API Documentation
An analytics platform API that lets you create your own analytics services.
OpenAPI spec version: 0.5
Contact: contact@rakam.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ErrorMessage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None, error_code=None):
"""
ErrorMessage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str',
'error_code': 'int'
}
self.attribute_map = {
'error': 'error',
'error_code': 'error_code'
}
self._error = error
self._error_code = error_code
@property
def error(self):
"""
Gets the error of this ErrorMessage.
:return: The error of this ErrorMessage.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this ErrorMessage.
:param error: The error of this ErrorMessage.
:type: str
"""
self._error = error
@property
def error_code(self):
"""
Gets the error_code of this ErrorMessage.
:return: The error_code of this ErrorMessage.
:rtype: int
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""
Sets the error_code of this ErrorMessage.
:param error_code: The error_code of this ErrorMessage.
:type: int
"""
self._error_code = error_code
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"emrekabakci@gmail.com"
] | emrekabakci@gmail.com |
f44b857fddcb103a891ca98241641c61f9c04692 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_188/ch73_2019_04_04_18_34_54_400262.py | cd8882d0dc247c4bb07246006762dc9e2db0cd04 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def remove_vogais(frase):
frase = str(frase)
frase = frase.lower()
vogais = ["a", "e", "i", "o", "u"]
contador = 0
sem_vogais = ""
while contador < len(frase):
if vogais not in frase[contador]:
sem_vogais += frase[contador]
contador += 1
return sem_vogais | [
"you@example.com"
] | you@example.com |
f8ac40843de7f398c6de044fef0cb2f7be52b6fa | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/server/protocol/receiver.py | 9d0143575ad9d50cb757456cb0c4061dbac5d25d | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 3,054 | py | ''' Assemble websocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado.concurrent import return_future
from ..exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
'''
On MessageError or ValidationError, the receiver will reset its state
and attempt to consume a new message.
NOTE: the *fragment* received can be either bytes or unicode, depending
on the transport's semantics (WebSocket allows both).
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
'''
def __init__(self, protocol):
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@return_future
def consume(self, fragment, callback=None):
'''
'''
self._current_consumer(fragment)
callback(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
093fc662077fab8d376ec677f4b1a61b8270631e | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20191012/example_metashape/17walker.py | 3c668d9b7ae518dec2b9f57007d8896ff261d94c | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,612 | py | import typing as t
T = t.TypeVar("T")
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
MetaData = t.Optional[t.Dict[str, t.Any]]
class Field(t.Generic[F]):
wrapped: F
def __init__(self, wrapped: F):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # noqa
pass
self.metadata = {"doc": getattr(self, "__doc__", None)}
def __get__(self, obj, type=None) -> T:
return self.wrapped(obj)
def get_metadata(cls: t.Type[t.Any], name: str) -> t.Optional[MetaData]:
prop = cls.__dict__.get(name)
if prop is None:
return None
return prop.metadata
def walk(
typ: t.Type[t.Any]
) -> t.Iterable[t.Tuple[str, t.Type[t.Any], t.Optional[MetaData]]]:
for fieldname, fieldtype in t.get_type_hints(typ).items():
yield fieldname, fieldtype, get_metadata(typ, fieldname)
class Person:
name: str
age: int = 0
def field(fn: F) -> Field[F]:
return Field(fn)
class WPerson(Person):
@field
def name(self) -> str:
"""name docstring"""
return "<name>"
@field
def nickname(self) -> t.Optional[str]:
"""nickname docstring"""
return None
print(WPerson.nickname, WPerson.age)
print(get_metadata(WPerson, "nickname"))
print("----------------------------------------")
for x in walk(WPerson):
print(x)
if t.TYPE_CHECKING:
reveal_type(WPerson.nickname)
reveal_type(WPerson().nickname)
print("========================================")
wp = WPerson()
print(wp.name, wp.nickname)
wp.nickname = "foo"
print(wp.name, wp.nickname)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
c0430d2eeb1e8011132f75363c50f7f85c37b417 | 8524e35d5848e7c6dcc774d35818c12bbc01bf67 | /taocode2/apps/user/auth.py | bbc40f94e10c64987528b578c36450d890be0cfb | [] | no_license | imbugs/taobaocode | 80f49e1829807b4751b2a6e6949c850843b996a7 | a8fbd7fc2f0d3f88eaeda7d944bcc688dee47b18 | refs/heads/master | 2021-01-02T23:06:29.518047 | 2013-05-16T03:34:47 | 2013-05-16T03:34:47 | 10,093,033 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | #
# Copyright (C) 2011 Taobao .Inc
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://code.taobao.org/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://code.taobao.org/.
from taocode2.models import User, secpwd
from taocode2.helper import consts
from django.db.models import Q
__author__ = 'luqi@taobao.com'
class UserAuthBackend:
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(Q(name__iexact=username) | Q(email__iexact=username),
status = consts.USER_ENABLE)
except User.DoesNotExist:
return None
if secpwd(password) != user.password:
return None
return user
def get_user(self, user_id):
try:
user = User.objects.get(pk=user_id)
return user
except User.DoesNotExist:
return None
def has_perm(self, user, perm):
return False
def supports_object_permissions(self):
return False
def supports_anonymous_user(self):
return False
| [
"imbugs@126.com"
] | imbugs@126.com |
f6b9b2ad4df858c10c4208fc0b3d6b28d0608d5f | d87f6d9e769709def3efcf30230cd8bf6ac2cef7 | /WWTest/util/sendEmailWithLink.py | 7bc4933f1049b445a170df1cade57d1a844b7237 | [] | no_license | wawj901124/centos8xitong | 876dcc45b895871119404ad1899ca59ab5dd90b6 | 81fc0d1151e3172ceec2093b035d2cd921e1a433 | refs/heads/master | 2023-02-23T22:33:22.314433 | 2021-01-31T01:54:35 | 2021-01-31T01:54:35 | 290,476,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,028 | py | import smtplib #导入smtplib,用来发送邮件和连接服务器
from email.mime.text import MIMEText #导入MIMEText,用来组织邮件格式内容
from email.mime.multipart import MIMEMultipart #添加附件用
from email.mime.application import MIMEApplication #添加附件用
class SendEmail:
globals() ['send_user'] = "xiang_kaizheng@wanweitech.com" #构建全局变量发送者
globals() ['email_host'] = "smtp.263.net" #构建全局变量邮件服务器的email_host(smpt)
globals() ['password'] = "wanwei889" #构建全局变量邮件服务器的password,邮箱服务器发送者的登录密码
# globals() ['send_user'] = "410287958@qq.com" #构建全局变量发送者
# globals() ['email_host'] = "smtp.qq.com" #构建全局变量邮件服务器的email_host(smpt)
# globals() ['password'] = "wbfiwwnajhrabijg" #构建全局变量邮件服务器的password,邮箱服务器发送者的登录密码
def send_mail(self,user_list,sub,content,filenamepath,reporturl=None): #收件人,主题,内容
user = "Mushishi" +"<"+send_user+">" #构建发送者
# message = MIMEText(content, _subtype='plain',_charset='utf-8') #构建内容,格式,编码
message = MIMEMultipart() #Multipart就是多个部分
if reporturl != None: #如果传递reporturl,则发送,如果没有,则不发送
html_msg = \
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>测试报告</title>
</head>
<body>
<h1>报告网址:<a href="%s">%s</a></h2>
</html>
""" % (reporturl,reporturl)
# html 内容
content_html = MIMEText(html_msg, "html", "utf-8") #使用HTML构造正文,HTML正文要在plain正文的前面,否则plain正文不会显示
message.attach(content_html) #使用HTML构造正文
message.attach(MIMEText(content, _subtype='plain', _charset='utf-8')) #构建正文内容,格式,编码
message['Subject'] = sub #定义邮件的主题
message['From'] = user #定义邮件发送者
message['To'] = ";".join(user_list) #以分号为分隔符将收件人分割开来
#附件内容:
htmlpart = MIMEApplication(open(filenamepath, 'rb').read())
htmlpart.add_header('Content-Disposition', 'attachment', filename=filenamepath)
message.attach(htmlpart)
server = smtplib.SMTP() #构建一个邮件服务器
server.connect(email_host) #连接到邮箱服务器
server.login(send_user,password) #登录邮箱
server.sendmail(user,user_list,message.as_string()) #发送邮件,发送者user,接收者user_list,发送内容message,需要用as_string()转义成字符串发送
server.close() #关闭服务器
def run_send(self,pass_count,fail_count,error_count,filenamepath,userlist=None,emailtitle=None):
pass_num = float(pass_count) #转换为浮点类型
fail_num = float(fail_count) #转换为浮点类型
error_num = float(error_count) #转换为浮点类型
count_num = pass_num + fail_num +error_num
#90%,百分比
pass_result = "%.2f%%" %(pass_num/count_num*100) #%.2f,浮点型数据小数点后保留两位,%%,转义百分号
fail_result = "%.2f%%" % (fail_num/count_num*100) #失败率
error_result = "%.2f%%" % (error_num / count_num * 100) # 失败率
if userlist == None:
user_list = ['xiang_kaizheng@wanweitech.com']
else:
user_list = userlist
if emailtitle == None:
sub = "自动化测试报告"
else:
sub = emailtitle
content = "此次一共执行用例个数为%s个,成功个数为%s个,失败个数为%s个,错误个数为%s个,通过率为%s,失败率为的%s,错误率为%s." %(count_num,pass_num,fail_num,error_num,pass_result,fail_result,error_result)
self.send_mail(user_list,sub,content,filenamepath) #调用本类发送邮件函数
def run_send_with_report(self,pass_count,fail_count,error_count,filenamepath,userlist=None,emailtitle=None,reporturl=None):
pass_num = float(pass_count) #转换为浮点类型
fail_num = float(fail_count) #转换为浮点类型
error_num = float(error_count) #转换为浮点类型
count_num = pass_num + fail_num +error_num
#90%,百分比
pass_result = "%.2f%%" %(pass_num/count_num*100) #%.2f,浮点型数据小数点后保留两位,%%,转义百分号
fail_result = "%.2f%%" % (fail_num/count_num*100) #失败率
error_result = "%.2f%%" % (error_num / count_num * 100) # 失败率
if userlist == None:
user_list = ['xiang_kaizheng@wanweitech.com']
else:
user_list = userlist
if emailtitle == None:
sub = "自动化测试报告"
else:
sub = emailtitle
content = "此次一共执行用例个数为%s个,成功个数为%s个,失败个数为%s个,错误个数为%s个,通过率为%s,失败率为的%s,错误率为%s." %(count_num,pass_num,fail_num,error_num,pass_result,fail_result,error_result)
self.send_mail(user_list,sub,content,filenamepath,reporturl=reporturl) #调用本类发送邮件函数
if __name__ == '__main__':
sen = SendEmail() #实例化
# sen.send_main([2,3,4],[5,6,7],'../report/01_report.html')
user_list = ['xiang_kaizheng@wanweitech.com']
emailtitle = "商户后台-登录_自动化测试报告"
# sen.run_send(2,0,0,'1.txt',user_list,emailtitle,)
sen.run_send(2,0,0,'1.txt',user_list,emailtitle)
sen.run_send_with_report(2,0,0,'1.txt',user_list,emailtitle,reporturl="123")
print("邮件已发送") | [
"wawj900805"
] | wawj900805 |
b7c60f81e448d17ba6e9307cc65b5f717b19cba0 | 7d1e66fec4675572d75d30c632406242973d84aa | /pysc2/bin/mem_leak_check.py | 27efe04d1d88ed96d5c2e341358cf597adea1a63 | [
"Apache-2.0"
] | permissive | monarchBacilluscoli/pysc2 | 91cdd0c85598f64f4c1c8b36126968bc04ac84a4 | e5df7d41205fdb2e205dac2777305f3f6a404e05 | refs/heads/master | 2020-03-22T17:35:57.294868 | 2018-07-09T14:18:38 | 2018-07-09T14:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for memory leaks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from future.builtins import range # pylint: disable=redefined-builtin
import psutil
from pysc2 import maps
from pysc2 import run_configs
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
def main(unused_argv):
interface = sc_pb.InterfaceOptions()
interface.raw = True
interface.score = True
interface.feature_layer.width = 24
interface.feature_layer.resolution.x = 84
interface.feature_layer.resolution.y = 84
interface.feature_layer.minimap_resolution.x = 64
interface.feature_layer.minimap_resolution.y = 64
timeline = []
start = time.time()
run_config = run_configs.get()
proc = run_config.start()
process = psutil.Process(proc.pid)
def add(s):
cpu = process.cpu_times().user
mem = process.memory_info().rss / 2 ** 20 # In Mb
timeline.append((time.time() - start, cpu, mem, s))
if mem > 2000:
raise Exception("2gb mem limit exceeded")
try:
add("Started")
controller = proc.controller
map_inst = maps.get("Simple64")
create = sc_pb.RequestCreateGame(
realtime=False, disable_fog=False,
local_map=sc_pb.LocalMap(map_path=map_inst.path,
map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random,
difficulty=sc_pb.CheatInsane)
join = sc_pb.RequestJoinGame(race=sc_common.Random, options=interface)
controller.create_game(create)
add("Created")
controller.join_game(join)
add("Joined")
for _ in range(30):
for i in range(2000):
controller.step(16)
obs = controller.observe()
if obs.player_result:
add("Lost")
break
if i % 100 == 0:
add(i)
controller.restart()
add("Restarted")
add("Done")
except KeyboardInterrupt:
pass
finally:
proc.close()
print("Timeline:")
for t in timeline:
print("[%7.3f] cpu: %5.1f s, mem: %4d M; %s" % t)
if __name__ == "__main__":
app.run(main)
| [
"tewalds@google.com"
] | tewalds@google.com |
05e26f13c90bcb7032e3df3e79b731b10641e170 | ee364e80138d6a2435ff069f3665b4ce36915e40 | /samples/set_pstn_black_list_item.py | 71dbaecc96a3afc048986596279305350ea06b56 | [
"MIT"
] | permissive | antoniotaranto/apiclient-python | 355b21efa7f526cc1f4edec2d45e68ec87b3e327 | 64a727ebecac27ce162f3f198edeb065ab8a6ca0 | refs/heads/master | 2022-02-18T05:14:01.075669 | 2019-09-02T09:58:57 | 2019-09-02T09:58:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
PSTN_BLACKLIST_ID = 1
PSTN_BLACKLIST_PHONE = "123456789"
try:
res = voxapi.set_pstn_black_list_item(PSTN_BLACKLIST_ID, PSTN_BLACKLIST_PHONE)
except VoximplantException as e:
print("Error: {}".format(e.message))
print(res)
| [
"andrey@voximplant.com"
] | andrey@voximplant.com |
b70144b62bd5c217a8e2b1f03e36a1c6efffae61 | a1b21aa9b4c3b99b9b16fd47686bcc76e6fafd18 | /file_and_exceptions/json_practice/favorite_number/favorite_number_writer.py | 3fc63f2e0de4fe8ce4be781f231ee1e33662a89b | [] | no_license | irfan87/python_tutorial | 986c5dae98a5ad928c3820bf0355f544c091caf0 | 71bbf8b8aba2d5a1fafc56b8cb15d471c428a0cf | refs/heads/master | 2020-06-05T00:52:07.619489 | 2019-08-19T02:56:41 | 2019-08-19T02:56:41 | 192,257,432 | 0 | 0 | null | 2019-08-19T02:56:42 | 2019-06-17T01:53:46 | Python | UTF-8 | Python | false | false | 562 | py | import os
import json
file_name = os.path.abspath('file_and_exceptions/json_practice/favorite_number/fav_number.json')
try:
# load the json file
with open(file_name) as json_file:
json_content = json.load(json_file)
except FileNotFoundError:
user_prompt = input("Please enter your favorite number: ")
# write the file
with open(file_name, 'w') as json_file:
json.dump(user_prompt, json_file)
print("To view the your favorite number, please run this app again.")
else:
print("Your favorite number is:", json_content) | [
"nerve2009@yahoo.com"
] | nerve2009@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.