repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
pansapiens/mytardis
tardis/apps/mx_views/views.py
3
2892
from django.conf import settings from django.core.paginator import Paginator, InvalidPage, EmptyPage from django.http import HttpResponse from tardis.tardis_portal.auth import decorators as authz from tardis.tardis_portal.models import Dataset from tardis.tardis_portal.shortcuts import get_experiment_referer from tardis.tardis_portal.shortcuts import render_response_index @authz.dataset_access_required def view_full_dataset(request, dataset_id): """Displays a MX Dataset and associated information. Shows a full (hundreds of images) dataset its metadata and a list of associated files with the option to show metadata of each file and ways to download those files. With write permission this page also allows uploading and metadata editing. Settings for this view: INSTALLED_APPS += ("tardis.apps.mx_views",) DATASET_VIEWS = [("http://synchrotron.org.au/views/dataset/full", "tardis.apps.mx_views.views.view_full_dataset"),] """ dataset = Dataset.objects.get(id=dataset_id) def get_datafiles_page(): # pagination was removed by someone in the interface but not here. # need to fix. pgresults = 100 paginator = Paginator(dataset.datafile_set.all(), pgresults) try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 # If page request (9999) is out of range, deliver last page of results. try: return paginator.page(page) except (EmptyPage, InvalidPage): return paginator.page(paginator.num_pages) display_images = dataset.get_images() image_count = len(display_images) if image_count > 4: # take 4 evenly spaced images from the set display_images = display_images[0::image_count / 4][:4] upload_method = getattr(settings, "UPLOAD_METHOD", "uploadify") c = { 'dataset': dataset, 'datafiles': get_datafiles_page(), 'parametersets': dataset.getParameterSets() .exclude(schema__hidden=True), 'has_download_permissions': authz.has_dataset_download_access(request, dataset_id), 'has_write_permissions': authz.has_dataset_write(request, dataset_id), 'from_experiment': \ get_experiment_referer(request, dataset_id), 'other_experiments': \ authz.get_accessible_experiments_for_dataset(request, dataset_id), 'display_images': display_images, 'upload_method': upload_method, 'default_organization': getattr(settings, 'DEFAULT_ARCHIVE_ORGANIZATION', 'classic'), 'default_format': getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tgz', 'tar'])[0] } return HttpResponse(render_response_index( request, 'mx_views/view_full_dataset.html', c))
bsd-3-clause
-8,726,488,663,588,781,000
37.052632
79
0.65491
false
4.011096
false
false
false
jvs/sourcer
tests/test_salesforce.py
1
5119
from sourcer import Grammar # This is work in progress. # See: https://help.salesforce.com/articleView?id=customize_functions.htm&type=5 g = Grammar(r''' ``` import ast ``` start = Expression Expression = OperatorPrecedence( Atom | "(" >> Expression << ")", Postfix(ArgumentList | FieldAccess), Prefix("-" | "+" | "!"), RightAssoc("^"), LeftAssoc("*" | "/"), LeftAssoc("+" | "-" | "&"), NonAssoc("<=" | "<" | ">=" | ">"), NonAssoc("!=" | "<>" | "==" | "="), LeftAssoc("&&"), LeftAssoc("||"), ) class ArgumentList { arguments: "(" >> (Expression /? ",") << ")" } class FieldAccess { field: "." >> Word } Atom = Global | Identifier | Rational | Integer | String class Global { name: "$" >> Word } class Identifier { name: Word } # ASK: What is the real syntax for these things? Word = /[_a-zA-Z][_a-zA-Z0-9]*/ Rational = /(\d+\.\d*)|(\d*\.\d+)/ |> `float` Integer = /\d+/ |> `int` StringLiteral = /("([^"\\]|\\.)*")/ | /('([^'\\]|\\.)*')/ # For now, just use ast module to evaluate string literals. class String { value: StringLiteral |> `ast.literal_eval` } ignore /\s+/ ''', include_source=True) aliases = { '=': '==', '<>': '!=', } constants = { 'NULL': None, 'TRUE': True, 'FALSE': False, } # Incomplete collection of evaluators. evaluators = { '*': lambda x, y: x * y if x is not None and y is not None else None, '/': lambda x, y: x / y if x is not None and y is not None else None, '+': lambda x, y: x + y if x is not None and y is not None else None, '-': lambda x, y: x - y if x is not None and y is not None else None, '==': lambda x, y: x == y, '!=': lambda x, y: x != y, '&&': lambda x, y: x and y, '||': lambda x, y: x or y, '>': lambda x, y: x > y if x is not None and y is not None else False, '<': lambda x, y: x < y if x is not None and y is not None else False, '>=': lambda x, y: x >= y if x is not None and y is not None else False, '<=': lambda x, y: x <= y if x is not None and y is not None else False, 'AND': lambda *a: all(a), 'CONTAINS': lambda x, y: str(y) in str(x) if x is not None else True, 'IF': lambda x, y, z: y if x else z, 'ISBLANK': lambda x: x is None, 'LOG': lambda x: log10(x) if x is not None else None, 'MAX': lambda *a: max(*a), 'MIN': lambda *a: min(*a), 'MOD': lambda x, y: (x % y) if x is not None and y is not None else None, 'NOT': lambda x: not(x), 'OR': lambda *a: any(a), 'SQRT': lambda x: sqrt(x) if x is not None else None, 'TEXT': lambda x: str(x), } def evaluate(node, bindings): # Look up identifiers. if isinstance(node, g.Identifier): if node.name in bindings: return bindings[node.name] name = node.name.upper() return bindings.get(name, name) # Look up fields. if isinstance(node, g.Postfix) and isinstance(node.operator, g.FieldAccess): obj, field = node.left, node.operator.field if hasattr(obj, field): return getattr(obj, field) elif isinstance(obj, dict): return obj.get(field) else: return node # Evaluate function calls and operators. if isinstance(node, g.Infix): x, func, y = node.left, node.operator, node.right args = (x, y) elif isinstance(node, g.Postfix) and isinstance(node.operator, g.ArgumentList): func, args = node.left, node.operator.arguments else: return node # Check if we're using an alias. func = aliases.get(func, func) if func in evaluators: return evaluators[func](*args) else: return node def run(formula, bindings=None): updated_bindings = dict(constants) updated_bindings.update(bindings or {}) tree = g.parse(formula) return g.transform(tree, lambda node: evaluate(node, updated_bindings)) def test_some_simple_formulas(): result = run('1 + 2 * 3') assert result == 7 result = run('foo == bar && fiz == buz', bindings={ 'foo': 1, 'bar': 1, 'fiz': 2, 'buz': 2, }) assert result == True result = run('foo == bar && fiz == buz', bindings={ 'foo': 1, 'bar': 1, 'fiz': 2, 'buz': 3, }) assert result == False result = run('1 <= 2 && (false || true)') assert result == True # Explicitly compare to True. result = run('1 > 2 || (true && false)') assert result == False # Explicitly compare to False. result = run('foo != bar', bindings={'foo': 10, 'bar': 10}) assert not result result = run('foo != bar', bindings={'foo': 1, 'bar': 2}) assert result result = run('foo.bar', bindings={'foo': {'bar': 10}}) assert result == 10 result = run('foo.bar.baz', bindings={'foo': {'bar': {'baz': 100}}}) assert result == 100 result = run('MIN(20, 10, 30)') assert result == 10 result = run('MIN(20, 10, 30) + MAX(11, 12, 13)') assert result == 23
mit
-7,452,110,781,552,088,000
27.126374
83
0.540535
false
3.374423
false
false
false
lsaffre/timtools
timtools/sdoc/feeders.py
1
1705
## Copyright 2003-2009 Luc Saffre ## This file is part of the TimTools project. ## TimTools is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## TimTools is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with TimTools; if not, see <http://www.gnu.org/licenses/>. import re def plain2xml(txt): txt = txt.replace("&","&amp;") txt = txt.replace("<","&lt;") return txt memocommands = ( ( re.compile('\[url\s+(\S+)\s*(.*?)\]',re.DOTALL), lambda m : '<b>'+m.group(2)+'</b> (<i>' + m.group(1)+ '</i>)'), ) # urlfind = # urlrepl = re.compile('<b>\2</b> (<u>\1</u>)') # def urlrepl(m): def memo2xml(txt): txt = plain2xml(txt) txt = txt.replace('[B]','<b>') txt = txt.replace('[b]','</b>') txt = txt.replace('[U]','<u>') txt = txt.replace('[u]','</u>') for find,repl in memocommands: txt = re.sub(find,repl,txt) return txt def rst2xml(txt): raise "doesn't work" import docutils.parsers.rst import docutils.utils parser = docutils.parsers.rst.Parser() doc = docutils.utils.new_document("feed") parser.parse(txt, doc) raise "and now?" _feeders={ 'xml' : lambda x : x, 'plain' : plain2xml, 'rst' : rst2xml, 'memo' : memo2xml, } def getFeeder(name): return _feeders[name]
bsd-2-clause
5,341,080,861,915,417,000
27.416667
71
0.63871
false
3.204887
false
false
false
souravbadami/zulip
zerver/views/home.py
1
17065
from __future__ import absolute_import from typing import Any, List, Dict, Optional, Text from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, HttpResponse, HttpRequest from django.shortcuts import redirect from django.utils import translation from django.utils.cache import patch_cache_control from six.moves import zip_longest, zip, range from zerver.decorator import zulip_login_required, process_client from zerver.forms import ToSForm from zerver.lib.realm_icon import realm_icon_url from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \ Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \ RealmFilter, PreregistrationUser, UserActivity, \ UserPresence, get_recipient, name_changes_disabled, email_to_username, \ list_of_domains_for_realm from zerver.lib.events import do_events_register from zerver.lib.actions import update_user_presence, do_change_tos_version, \ do_update_pointer, get_cross_realm_dicts, realm_user_count from zerver.lib.avatar import avatar_url from zerver.lib.i18n import get_language_list, get_language_name, \ get_language_list_for_templates from zerver.lib.push_notifications import num_push_devices_for_user from zerver.lib.streams import access_stream_by_name from zerver.lib.utils import statsd, get_subdomain from zproject.backends import password_auth_enabled from zproject.jinja2 import render_to_response import calendar import datetime import logging import os import re import simplejson import time @zulip_login_required def accounts_accept_terms(request): # type: (HttpRequest) -> HttpResponse if request.method == "POST": form = ToSForm(request.POST) if form.is_valid(): do_change_tos_version(request.user, settings.TOS_VERSION) return redirect(home) else: form = ToSForm() email = request.user.email special_message_template = None if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None: special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE return render_to_response( 'zerver/accounts_accept_terms.html', {'form': form, 'email': email, 'special_message_template': special_message_template}, request=request) def approximate_unread_count(user_profile): # type: (UserProfile) -> int not_in_home_view_recipients = [sub.recipient.id for sub in Subscription.objects.filter( user_profile=user_profile, in_home_view=False)] # TODO: We may want to exclude muted messages from this count. # It was attempted in the past, but the original attempt # was broken. When we re-architect muting, we may # want to to revisit this (see git issue #1019). return UserMessage.objects.filter( user_profile=user_profile, message_id__gt=user_profile.pointer).exclude( message__recipient__type=Recipient.STREAM, message__recipient__id__in=not_in_home_view_recipients).exclude( flags=UserMessage.flags.read).count() def sent_time_in_epoch_seconds(user_message): # type: (UserMessage) -> Optional[float] # user_message is a UserMessage object. if not user_message: return None # We have USE_TZ = True, so our datetime objects are timezone-aware. # Return the epoch seconds in UTC. return calendar.timegm(user_message.message.pub_date.utctimetuple()) def home(request): # type: (HttpRequest) -> HttpResponse if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'): response = render_to_response('zerver/handlebars_compilation_failed.html', request=request) response.status_code = 500 return response if not settings.SUBDOMAINS_HOMEPAGE: return home_real(request) # If settings.SUBDOMAINS_HOMEPAGE, sends the user the landing # page, not the login form, on the root domain subdomain = get_subdomain(request) if subdomain != "": return home_real(request) return render_to_response('zerver/hello.html', request=request) @zulip_login_required def home_real(request): # type: (HttpRequest) -> HttpResponse # We need to modify the session object every two weeks or it will expire. # This line makes reloading the page a sufficient action to keep the # session alive. request.session.modified = True user_profile = request.user # If a user hasn't signed the current Terms of Service, send them there if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \ int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version(): return accounts_accept_terms(request) narrow = [] # type: List[List[Text]] narrow_stream = None narrow_topic = request.GET.get("topic") if request.GET.get("stream"): try: narrow_stream_name = request.GET.get("stream") (narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name( user_profile, narrow_stream_name) narrow = [["stream", narrow_stream.name]] except Exception: logging.exception("Narrow parsing") if narrow_stream is not None and narrow_topic is not None: narrow.append(["topic", narrow_topic]) register_ret = do_events_register(user_profile, request.client, apply_markdown=True, narrow=narrow) user_has_messages = (register_ret['max_message_id'] != -1) # Reset our don't-spam-users-with-email counter since the # user has since logged in if user_profile.last_reminder is not None: user_profile.last_reminder = None user_profile.save(update_fields=["last_reminder"]) # Brand new users get the tutorial needs_tutorial = settings.TUTORIAL_ENABLED and \ user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED first_in_realm = realm_user_count(user_profile.realm) == 1 # If you are the only person in the realm and you didn't invite # anyone, we'll continue to encourage you to do so on the frontend. prompt_for_invites = first_in_realm and \ not PreregistrationUser.objects.filter(referred_by=user_profile).count() if user_profile.pointer == -1 and user_has_messages: # Put the new user's pointer at the bottom # # This improves performance, because we limit backfilling of messages # before the pointer. It's also likely that someone joining an # organization is interested in recent messages more than the very # first messages on the system. register_ret['pointer'] = register_ret['max_message_id'] user_profile.last_pointer_updater = request.session.session_key if user_profile.pointer == -1: latest_read = None else: try: latest_read = UserMessage.objects.get(user_profile=user_profile, message__id=user_profile.pointer) except UserMessage.DoesNotExist: # Don't completely fail if your saved pointer ID is invalid logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer)) latest_read = None desktop_notifications_enabled = user_profile.enable_desktop_notifications if narrow_stream is not None: desktop_notifications_enabled = False if user_profile.realm.notifications_stream: notifications_stream = user_profile.realm.notifications_stream.name else: notifications_stream = "" # Set default language and make it persist default_language = register_ret['default_language'] url_lang = '/{}'.format(request.LANGUAGE_CODE) if not request.path.startswith(url_lang): translation.activate(default_language) request.session[translation.LANGUAGE_SESSION_KEY] = default_language # Pass parameters to the client-side JavaScript code. # These end up in a global JavaScript Object named 'page_params'. page_params = dict( # Server settings. share_the_love = settings.SHARE_THE_LOVE, development_environment = settings.DEVELOPMENT, debug_mode = settings.DEBUG, test_suite = settings.TEST_SUITE, poll_timeout = settings.POLL_TIMEOUT, login_page = settings.HOME_NOT_LOGGED_IN, server_uri = settings.SERVER_URI, maxfilesize = settings.MAX_FILE_UPLOAD_SIZE, max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE, server_generation = settings.SERVER_GENERATION, use_websockets = settings.USE_WEBSOCKETS, save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES, # realm data. # TODO: Move all of these data to register_ret and pull from there realm_uri = user_profile.realm.uri, password_auth_enabled = password_auth_enabled(user_profile.realm), domains = list_of_domains_for_realm(user_profile.realm), name_changes_disabled = name_changes_disabled(user_profile.realm), mandatory_topics = user_profile.realm.mandatory_topics, show_digest_email = user_profile.realm.show_digest_email, realm_presence_disabled = user_profile.realm.presence_disabled, is_zephyr_mirror_realm = user_profile.realm.is_zephyr_mirror_realm, # user_profile data. # TODO: Move all of these data to register_ret and pull from there fullname = user_profile.full_name, email = user_profile.email, enter_sends = user_profile.enter_sends, user_id = user_profile.id, is_admin = user_profile.is_realm_admin, can_create_streams = user_profile.can_create_streams(), autoscroll_forever = user_profile.autoscroll_forever, default_desktop_notifications = user_profile.default_desktop_notifications, avatar_url = avatar_url(user_profile), avatar_url_medium = avatar_url(user_profile, medium=True), avatar_source = user_profile.avatar_source, timezone = user_profile.timezone, # Stream message notification settings: stream_desktop_notifications_enabled = user_profile.enable_stream_desktop_notifications, stream_sounds_enabled = user_profile.enable_stream_sounds, # Private message and @-mention notification settings: desktop_notifications_enabled = desktop_notifications_enabled, sounds_enabled = user_profile.enable_sounds, enable_offline_email_notifications = user_profile.enable_offline_email_notifications, pm_content_in_desktop_notifications = user_profile.pm_content_in_desktop_notifications, enable_offline_push_notifications = user_profile.enable_offline_push_notifications, enable_online_push_notifications = user_profile.enable_online_push_notifications, enable_digest_emails = user_profile.enable_digest_emails, # Realm foreign key data from register_ret. # TODO: Rename these to match register_ret values. subbed_info = register_ret['subscriptions'], unsubbed_info = register_ret['unsubscribed'], neversubbed_info = register_ret['never_subscribed'], people_list = register_ret['realm_users'], bot_list = register_ret['realm_bots'], initial_pointer = register_ret['pointer'], initial_presences = register_ret['presences'], event_queue_id = register_ret['queue_id'], # Misc. extra data. have_initial_messages = user_has_messages, initial_servertime = time.time(), # Used for calculating relative presence age default_language_name = get_language_name(register_ret['default_language']), language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']), language_list = get_language_list(), needs_tutorial = needs_tutorial, first_in_realm = first_in_realm, prompt_for_invites = prompt_for_invites, notifications_stream = notifications_stream, cross_realm_bots = list(get_cross_realm_dicts()), unread_count = approximate_unread_count(user_profile), furthest_read_time = sent_time_in_epoch_seconds(latest_read), has_mobile_devices = num_push_devices_for_user(user_profile) > 0, ) # These fields will be automatically copied from register_ret into # page_params. It is a goal to move more of the page_params list # into this sort of cleaner structure. page_params_core_fields = [ 'alert_words', 'attachments', 'default_language', 'emoji_alt_code', 'last_event_id', 'left_side_userlist', 'max_icon_file_size', 'max_message_id', 'muted_topics', 'realm_add_emoji_by_admins_only', 'realm_allow_message_editing', 'realm_authentication_methods', 'realm_bot_domain', 'realm_create_stream_by_admins_only', 'realm_default_language', 'realm_default_streams', 'realm_email_changes_disabled', 'realm_emoji', 'realm_filters', 'realm_icon_source', 'realm_icon_url', 'realm_invite_by_admins_only', 'realm_invite_required', 'realm_message_content_edit_limit_seconds', 'realm_name', 'realm_name_changes_disabled', 'realm_restricted_to_domain', 'realm_waiting_period_threshold', 'referrals', 'twenty_four_hour_time', 'zulip_version', ] for field_name in page_params_core_fields: page_params[field_name] = register_ret[field_name] if narrow_stream is not None: # In narrow_stream context, initial pointer is just latest message recipient = get_recipient(Recipient.STREAM, narrow_stream.id) try: initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id except IndexError: initial_pointer = -1 page_params["narrow_stream"] = narrow_stream.name if narrow_topic is not None: page_params["narrow_topic"] = narrow_topic page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow] page_params["max_message_id"] = initial_pointer page_params["initial_pointer"] = initial_pointer page_params["have_initial_messages"] = (initial_pointer != -1) statsd.incr('views.home') show_invites = True # Some realms only allow admins to invite users if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin: show_invites = False request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],) response = render_to_response('zerver/index.html', {'user_profile': user_profile, 'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params), 'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")), 'avatar_url': avatar_url(user_profile), 'show_debug': settings.DEBUG and ('show_debug' in request.GET), 'pipeline': settings.PIPELINE_ENABLED, 'show_invites': show_invites, 'is_admin': user_profile.is_realm_admin, 'show_webathena': user_profile.realm.webathena_enabled, 'enable_feedback': settings.ENABLE_FEEDBACK, 'embedded': narrow_stream is not None, }, request=request) patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True) return response @zulip_login_required def desktop_home(request): # type: (HttpRequest) -> HttpResponse return HttpResponseRedirect(reverse('zerver.views.home.home')) def is_buggy_ua(agent): # type: (str) -> bool """Discrimiate CSS served to clients based on User Agent Due to QTBUG-3467, @font-face is not supported in QtWebKit. This may get fixed in the future, but for right now we can just serve the more conservative CSS to all our desktop apps. """ return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \ "Mac" not in agent
apache-2.0
-2,616,524,014,721,912,000
44.75067
113
0.642309
false
4.053444
true
false
false
ZeitOnline/zeit.newsletter
src/zeit/newsletter/browser/edit.py
1
2579
from zeit.cms.i18n import MessageFactory as _ from zope.cachedescriptors.property import Lazy as cachedproperty import os.path import zeit.cms.browser.view import zeit.cms.content.interfaces import zeit.cms.interfaces import zeit.content.image.interfaces import zeit.content.video.interfaces import zeit.edit.browser.form import zeit.edit.browser.landing import zeit.edit.browser.view import zeit.newsletter.interfaces import zope.formlib.form class LandingZoneBase(zeit.edit.browser.landing.LandingZone): uniqueId = zeit.edit.browser.view.Form('uniqueId') block_type = 'teaser' def initialize_block(self): content = zeit.cms.interfaces.ICMSContent(self.uniqueId) self.block.reference = content class GroupLandingZone(LandingZoneBase): """Handler to drop objects to the body's landing zone.""" order = 0 class TeaserLandingZone(LandingZoneBase): """Handler to drop objects after other objects.""" order = 'after-context' class Teaser(zeit.cms.browser.view.Base): @cachedproperty def metadata(self): return zeit.cms.content.interfaces.ICommonMetadata( self.context.reference, None) @cachedproperty def image(self): # XXX copy&paste&tweak of zeit.content.cp.browser.blocks.teaser.Display content = self.context.reference if content is None: return if zeit.content.video.interfaces.IVideoContent.providedBy(content): return content.thumbnail images = zeit.content.image.interfaces.IImages(content, None) if images is None: preview = zope.component.queryMultiAdapter( (content, self.request), name='preview') if preview: return self.url(preview) return if not images.image: return group = images.image for name in group: basename, ext = os.path.splitext(name) if basename.endswith('148x84'): image = group[name] return self.url(image, '@@raw') class Advertisement(zeit.cms.browser.view.Base): @cachedproperty def image(self): if not self.context.image: return return self.url(self.context.image, '@@raw') class GroupTitle(zeit.edit.browser.form.InlineForm): legend = None prefix = 'group' undo_description = _('edit group title') form_fields = zope.formlib.form.FormFields( zeit.newsletter.interfaces.IGroup).select('title') class Empty(object): def render(self): return u''
bsd-3-clause
-2,014,002,219,757,949,200
27.032609
79
0.67119
false
3.781525
false
false
false
vadyur/script.media.aggregator
anidub.py
1
12586
# coding: utf-8 import log from log import debug from settings import Settings from base import * import feedparser, urllib2, re from bs4 import BeautifulSoup from nfowriter import * from strmwriter import * import requests, filesystem ################################################################################################### class DescriptionParser(DescriptionParserBase): #============================================================================================== def get_content(self, url): page = urllib2.urlopen(url) return page #============================================================================================== def __init__(self, url): Informer.__init__(self) self._dict = dict() self.content = self.get_content(url) #html_doc = '<?xml version="1.0" encoding="UTF-8" ?>\n<html>' + content.encode('utf-8') + '\n</html>' self.soup = BeautifulSoup(self.content, 'html.parser') self.OK = self.parse() #============================================================================================== def get_tag(self, x): return { u'Год: ': u'year', u'Жанр: ': u'genre', u'Описание: ': u'plot', u'Режиссер: ': u'director', u'Продолжительность: ': u'runtime', u'Страна: ': u'country', }.get(x, u'') #============================================================================================== def clean(self, title): try: title = title.split(u' ТВ-')[0] title = title.split(u' TV-')[0] title = title.split(u' [')[0] except: pass return title.strip() #============================================================================================== def get_title(self, full_title): try: found = re.search('^(.+?) /', full_title).group(1) return self.clean(found) except AttributeError: return full_title #============================================================================================== def get_original_title(self, full_title): try: found = re.search('^.+? / (.+)', full_title).group(1) return self.clean(found) except AttributeError: return full_title #============================================================================================== def parse_season_from_title(self, title): try: found = re.search(r"(\d) \[\d+\D+\d+\]", title) if found: try: self._dict['season'] = int(found.group(1)) return except: pass parts = title.split(u'ТВ-') if len(parts) == 1: parts = title.split(u'TV-') if len(parts) > 1: found = re.search('([0-9]+)', parts[1]).group(1) self._dict['season'] = int(found) except: pass #============================================================================================== def get_episodes_num(self, full_title): try: found = re.search(' \[([0-9]+) ', full_title).group(1) return int(found) except AttributeError: return 1 def date_added_duration(self): ul = self.soup.find('ul', class_='story_inf') if ul: for li in ul.find_all('li'): txt = li.get_text() parts = txt.split(':') if len(parts) > 1 and parts[0] == u'Дата': date, t = parts[1].split(',') # d u' 30-09-2012' unicode from datetime import datetime, timedelta day = timedelta(1) yesterday = datetime.today() - day #date = ' 30-09-2012' if u'Сегодня' in date: d = datetime.today() elif u'Вчера' in date: d = yesterday else: try: d = datetime.strptime(date.strip(), '%d-%m-%Y') except TypeError: d = datetime.today() dt = datetime.today() - d return dt #============================================================================================== def parse(self): tag = u'' self._dict['gold'] = False self._dict['season'] = 1 for title in self.soup.select('#news-title'): full_title = title.get_text() debug(full_title) self._dict['title'] = self.get_title(full_title) self._dict['originaltitle'] = self.get_original_title(full_title) self.parse_season_from_title(full_title) self._dict['episodes'] = self.get_episodes_num(full_title) for b in self.soup.select('div.xfinfodata b'): try: text = b.get_text() tag = self.get_tag(text) if tag != '': span = b.find_next_sibling('span') self._dict[tag] = span.get_text().strip() except: pass for div in self.soup.select('div.story_c'): try: text = div.get_text() text = text.split(u'Описание:')[1] text = text.split(u'Эпизоды')[0] text = text.split(u'Скриншоты')[0] text = text.strip() self._dict['plot'] = text #debug('---') #debug(text) #debug('---') except: pass for b in self.soup.select('div.story_h .rcol sup b'): try: text = b.get_text() text = text.split(' ')[0] self._dict['rating'] = float(text) * 2 debug('rating: ' + str(self._dict['rating'])) except: pass for img in self.soup.select('span.poster img'): try: self._dict['thumbnail'] = img['src'].strip() debug(self._dict['thumbnail']) except: pass fanart = [] for a in self.soup.select('ul.clr li a'): try: debug(a['href']) fanart.append(a['href'].strip()) except: pass if len(fanart) != 0: self._dict['fanart'] = fanart # else: # dt = self.date_added_duration() # if dt and dt.days <= 14: # return False for img in self.soup.select('div.video_info a img'): try: self._dict['studio'] = img['alt'].strip() debug(self._dict['studio']) except: pass tags = [] for a in self.soup.select('a[href*="https://tr.anidub.com/tags/"]'): tags.append(a.get_text().strip()) if len(tags) > 0: self._dict['tag'] = tags return True ################################################################################################### def write_tvshow_nfo(parser, tvshow_api, tvshow_path): try: if write_tvshow_nfo.favorites: parser.Dict().get('tag', []).append('favorites') except: pass NFOWriter(parser, tvshow_api=tvshow_api).write_tvshow_nfo(tvshow_path) return ################################################################################################### def write_tvshow(content, path, settings): with filesystem.save_make_chdir_context(path, 'Anidub.write_tvshow'): d = feedparser.parse(content) cnt = 0 settings.progress_dialog.update(0, 'anidub', path) for item in d.entries: write_tvshow_item(item, path, settings) cnt += 1 settings.progress_dialog.update(cnt * 100 / len(d.entries), 'anidub', path) def write_tvshow_item(item, path, settings, path_out=[]): debug('-------------------------------------------------------------------------') debug(item.link) parser = DescriptionParser(item.link) if parser.parsed(): title = parser.get_value('title') debug(title) originaltitle = parser.get_value('originaltitle') debug(originaltitle) season = parser.get_value('season') from downloader import TorrentDownloader TorrentDownloader(item.link, settings.torrents_path(), settings).download() debug('Episodes: ' + str(parser.get_value('episodes'))) tvshow_path = make_fullpath(title, '') tvshow_path = filesystem.join(path, tvshow_path) debug(tvshow_path) path_out.append(tvshow_path) settings.update_paths.add(tvshow_path) with filesystem.save_make_chdir_context(tvshow_path, 'Anidub.write_tvshow_item'): tvshow_api = TVShowAPI.get_by(originaltitle, title) write_tvshow_nfo(parser, tvshow_api, tvshow_path) season_path = filesystem.join(tvshow_path, u'Season ' + unicode(season)) debug(season_path) with filesystem.save_make_chdir_context(season_path, 'Anidub.write_tvshow_item_2'): episodes = tvshow_api.episodes(season) if len(episodes) < parser.get_value('episodes'): for i in range(len(episodes) + 1, parser.get_value('episodes') + 1): episodes.append({ 'title': title, 'showtitle': title, 'short': 's%02de%02d' % (season, i), 'episode': i, 'season': season }) for episode in episodes: title = episode['title'] shortName = episode['short'] episodeNumber = episode['episode'] if episodeNumber <= parser.get_value('episodes'): filename = str(episodeNumber) + '. ' + 'episode_' + shortName debug(filename) ep = tvshow_api.Episode(season, episodeNumber) if ep: episode = ep STRMWriter(item.link).write(filename, season_path, episodeNumber=episodeNumber, settings=settings) NFOWriter(parser, tvshow_api=tvshow_api).write_episode(episode, filename, season_path) else: skipped(item) del parser def get_session(settings): s = requests.Session() data = {"login_name": settings.anidub_login, "login_password": settings.anidub_password, "login": "submit"} headers = { 'Host': 'tr.anidub.com', 'Origin': 'https://tr.anidub.com', 'Referer': 'https://tr.anidub.com/', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132' } login = s.post("https://tr.anidub.com/", data=data, headers=headers) debug('Login status: %d' % login.status_code) if 'login_name' in login.content: debug('Login failed') return s def download_torrent(url, path, settings): from base import save_hashes save_hashes(path) url = urllib2.unquote(url) debug('download_torrent:' + url) s = get_session(settings) page = s.get(url) #debug(page.text.encode('utf-8')) soup = BeautifulSoup(page.text, 'html.parser') try: a = soup.select_one('#tv720 div.torrent_h a') except TypeError: a = None try: if a is None: a = soup.select_one('div.torrent_h > a') except TypeError: a = None if a is not None: href = 'https://tr.anidub.com' + a['href'] debug(s.headers) r = s.get(href, headers={'Referer': url}) debug(r.headers) if 'Content-Type' in r.headers: if not 'torrent' in r.headers['Content-Type']: return False try: with filesystem.fopen(path, 'wb') as torr: for chunk in r.iter_content(100000): torr.write(chunk) save_hashes(path) return True except: pass return False def write_pages(url, path, settings, params={}, filter_fn=None, dialog_title = None, path_out=[]): s = get_session(settings) if params: page = s.post(url, data=params) else: page = s.get(url) soup = BeautifulSoup(page.content, 'html.parser') page_no = 1 cnt = 0 class Item: def __init__(self, link, title): self.link = link self.title = title with filesystem.save_make_chdir_context(path, 'Anidub.write_pages'): while True: if params: selector = soup.select('div.search_post > div.text > h2 > a') else: selector = soup.select('article.story > div.story_h > div.lcol > h2 > a') if not selector: break settings.progress_dialog.update(0, dialog_title, path) for a in selector: log.debug(a['href']) link = a['href'] title = a.get_text() if filter_fn and filter_fn(title): continue write_tvshow_item(Item(link, title), path, settings, path_out) cnt += 1 settings.progress_dialog.update(cnt * 100 / len(selector), dialog_title, path) if not 'favorites' in url: break page_no += 1 page = s.get(url + 'page/%d/' % page_no) if page.status_code == requests.codes.ok: soup = BeautifulSoup(page.text, 'html.parser') else: break return cnt def write_favorites(path, settings): write_pages('https://tr.anidub.com/favorites/', path, settings, dialog_title=u'Избранное AniDUB') def search_generate(what, settings, path_out): def filter(title): if what not in title: return True return False write_tvshow_nfo.favorites = False return write_pages('https://tr.anidub.com/index.php?do=search', settings.anime_tvshow_path(), settings, {'do': 'search', 'subaction': 'search', 'story': what.encode('utf-8')}, filter, dialog_title=u'Поиск AniDUB', path_out=path_out) ################################################################################################### def run(settings): if settings.anime_save: if settings.anidub_rss: write_tvshow_nfo.favorites = False write_tvshow(settings.anidub_url, settings.anime_tvshow_path(), settings) if settings.anidub_favorite: write_tvshow_nfo.favorites = True write_favorites(settings.anime_tvshow_path(), settings) if __name__ == '__main__': settings = Settings('../media_library') run(settings)
gpl-3.0
2,000,693,373,387,839,200
26.610619
118
0.567228
false
3.073892
false
false
false
robdobsn/AmazonEchoShopping
WaitroseService/WaitroseScraper.py
1
20691
# Waitrose web scraper __author__ = 'robdobsn' from selenium import webdriver from selenium.webdriver.common.keys import Keys import selenium.webdriver.support.ui as webdriverui from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException, WebDriverException, TimeoutException from selenium.webdriver.support import expected_conditions as EC from bs4 import BeautifulSoup import logging import json import re class WaitroseScraper(): def __init__(self): logging.info("Waitrose scraper starting") self.isInitalized = False self.isLoggedIn = False self.webDriverType = "PhantomJS" self.execUsingJS = False def clickButtonByClassName(self, className): if self.execUsingJS: self.webDriver.execute_script("document.getElementsByClassName('" + className + "')[0].click()") else: btn = self.webDriver.find_element_by_class_name(className) btn.click() def clickButtonByXPath(self, xpath): if self.execUsingJS: self.webDriver.execute_script("return document.evaluate('" + xpath + "', document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()") else: btn = self.webDriver.find_element_by_xpath(xpath) btn.click() def clickButtonByCSSSelector(self, cssSelector): btn = self.webDriver.find_element_by_css_selector(cssSelector) btn.click() def checkButtonEnabledByCSSSelector(self, cssSelector): btn = self.webDriver.find_element_by_css_selector(cssSelector) return btn.is_enabled() and btn.is_displayed() def sendKeysToFieldById(self, elemId, strToSend, pressEnterAfter, clearFirst): # if self.execUsingJS: # self.webDriver.execute_script("document.getElementsByClassName('" + elemId + "').value = '" + strToSend) # else: print("Sending keys to elemId " + elemId + " keys = " + strToSend) field = self.webDriver.find_element_by_id(elemId) print(field) if (clearFirst): field.send_keys(Keys.CONTROL + "a") field.send_keys(Keys.DELETE) field.send_keys(strToSend + (Keys.RETURN if pressEnterAfter else "")) def debugDumpPageSource(self, filenameExtra=""): with open("debugPageSource" + filenameExtra + ".html", "w") as debugDumpFile: debugDumpFile.write(self.webDriver.page_source) self.webDriver.save_screenshot('debugPageImage.png') # Start the web driver (runs the browser) def startWebDriver(self): # Clear current session file info with open('browserSession.json', 'w') as outfile: json.dump({}, outfile) # Create WebDriver if self.webDriverType == "Chrome": try: self.webDriver = webdriver.Chrome() except WebDriverException: logging.error("startWebDriver() Chrome Failed to start") return False elif self.webDriverType == "Firefox": try: self.webDriver = webdriver.Firefox() except WebDriverException: logging.error("startWebDriver() Firefox Failed to start") return False elif self.webDriverType == "PhantomJS": try: self.webDriver = webdriver.PhantomJS() # or add to your PATH except: try: self.webDriver = webdriver.PhantomJS( executable_path='C:\ProgramData\PhantomJS\bin') except: try: self.webDriver = webdriver.PhantomJS( executable_path='/usr/local/lib/node_modules/phantomjs/lib/phantom/bin/phantomjs') except: try: self.webDriver = webdriver.PhantomJS( executable_path=r'C:\Users\rob_2\AppData\Roaming\npm\node_modules\phantomjs\lib\phantom\bin\phantomjs.exe') except: logging.error("Failed to load the PhantomJS webdriver") return False # Set the window size (seems to be needed in phantomJS particularly # This is probably because the website responds in mobile mode? self.webDriver.set_window_size(1280,1024) # Save session info url = self.webDriver.command_executor._url session_id = self.webDriver.session_id with open('browserSession.json', 'w') as outfile: json.dump({"url": url, "session_id": session_id}, outfile) return True def websiteLogin(self, username, password, attemptIdx): try: self.webDriver.save_screenshot('debug1_'+str(attemptIdx)+'.png') logging.info("Waiting for signInRegister button") wait = WebDriverWait(self.webDriver, 30) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-sign-in-register"))) logging.info("waitroseLogin() pressing signInRegister button") self.clickButtonByClassName('js-sign-in-register') self.webDriver.save_screenshot('debug2_'+str(attemptIdx)+'.png') try: print("Starting to wait for logon-email") wait = WebDriverWait(self.webDriver, 30) wait.until(EC.visibility_of_element_located((By.ID, "logon-email"))) print("Finished waiting for logon-email") self.webDriver.save_screenshot('debug3_' + str(attemptIdx) + '.png') try: logging.info("waitroseLogin() entering username") self.debugDumpPageSource("contbutton") self.sendKeysToFieldById('logon-email', username, False, True) self.webDriver.save_screenshot('debug4_' + str(attemptIdx) + '.png') # self.clickButtonByXPath("//input[@type='button' and @value='Continue']") if (self.checkButtonEnabledByCSSSelector("input[value='Continue'][type='button']")): self.clickButtonByCSSSelector("input[value='Continue'][type='button']") try: logging.info("waitroseLogin() waiting for logon-password visible") wait = WebDriverWait(self.webDriver, 60) wait.until(EC.visibility_of_element_located((By.ID, "logon-password"))) self.webDriver.save_screenshot('debug5_' + str(attemptIdx) + '.png') try: logging.info("waitroseLogin() entering password") self.sendKeysToFieldById('logon-password', password, False, True) #self.clickButtonById('logon-button-sign-in') self.clickButtonByCSSSelector("input[value='Sign in'][type='button']") self.webDriver.save_screenshot('debug6_' + str(attemptIdx) + '.png') logging.info("waitroseLogin() waiting for trolley-total to be visible") wait = WebDriverWait(self.webDriver, 60) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total"))) self.webDriver.save_screenshot('debug7_' + str(attemptIdx) + '.png') elem2 = self.webDriver.find_element_by_class_name('trolley-total') if elem2: logging.info("waitroseLogin() basket found") else: logging.info("waitroseLogin() basket not found") return True except WebDriverException as err: logging.error("waitroseLogin() Cannot find logon-password after wait " + err.msg) self.debugDumpPageSource() except WebDriverException as err: logging.error("waitroseLogin() Cannot find logon-password field" + err.msg) self.debugDumpPageSource() except WebDriverException as err: logging.error("waitroseLogin() Error entering logon-email" + err.msg) self.debugDumpPageSource() except WebDriverException as err: logging.error("waitroseLogin() Cannot find logon-email field" + err.msg) self.debugDumpPageSource() except WebDriverException as err: logging.error("waitroseLogin() Cannot find sign-in-register button" + err.msg) self.debugDumpPageSource() return False def getBasketSummary(self): basketSummary = {} # Ensure we wait until the trolley-total is visible try: wait = WebDriverWait(self.webDriver, 20) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total"))) except TimeoutException: logging.error("Get basket summary timeout exception") self.debugDumpPageSource() return None except WebDriverException: logging.error("Get basket summary webdriver element exception") self.debugDumpPageSource() return None # Get basket total price try: totalElem = self.webDriver.find_element_by_class_name('trolley-total') if totalElem: reTotalElem = re.search("([0-9]{1,4}\.[0-9]{2})", totalElem.text) if reTotalElem: basketSummary["totalPrice"] = reTotalElem.group(1) logging.info("waitrose: Basket: total=" + str(basketSummary["totalPrice"])) # Get number of basket items summaryElem = self.webDriver.find_element_by_class_name('trolley-summary') if summaryElem: reSummaryElem = re.search("([0-9]{1,4}) items", summaryElem.text) if reSummaryElem: basketSummary["numItems"] = reSummaryElem.group(1) logging.info("waitrose: Basket: num items=" + str(basketSummary["numItems"])) except WebDriverException: logging.error("waitrose: Get basket summary webdriver element exception") self.debugDumpPageSource() return None # Return info found return basketSummary def getElemAttrIfPresent(self, soup, elemName, className, subElem, attrName, regexReplace, destDict=None, dictName=None): rslt = "" try: el = soup.find(elemName, class_=className) if subElem is not "": el = el.find(subElem) if attrName == "text": rslt = el.get_text() else: rslt = el[attrName] if regexReplace is not "": rslt = re.sub(regexReplace, "", rslt) if destDict is not None: destDict[dictName] = rslt except WebDriverException: logging.error("waitrose: Error extracting element " + elemName + " " + className) self.debugDumpPageSource() except: logging.error("waitrose: Error (not webdriver) extracting element " + elemName + " " + className) self.debugDumpPageSource() return rslt def getShoppingItems(self, isTrolleyPage): # Make sure all items on the page are loaded - lazy loader try: self.debugDumpPageSource("m-product") webdriverui.WebDriverWait(self.webDriver, 10)\ .until(EC.visibility_of_element_located((By.CLASS_NAME, "m-product"))) except WebDriverException: logging.error("Wait for m-product webdriver element exception") return [] productsFound = self.webDriver.find_elements_by_class_name("m-product") print("waitrose: Lazy loading products - currently " + str(len(productsFound)) + " found") numRepeats = 0 if len(productsFound) > 10: while True: prevFound = len(productsFound) self.webDriver.execute_script("window.scrollBy(0,window.innerHeight)") productsFound = self.webDriver.find_elements_by_class_name("m-product") print("Loading products - currently " + str(len(productsFound)) + " found") if len(productsFound) <= prevFound: numRepeats += 1 if numRepeats > 20: break else: numRepeats = 0 print("Done lazy loading products " + str(len(productsFound)) + " found") # Go through items in the list on the current page shoppingItems = [] for product in productsFound: # Get HTML for this product basketIt = {} el = product.get_attribute("innerHTML") productSoup = BeautifulSoup(el, "html.parser") # Extract some common details self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "href", "", basketIt, "detailsHref") self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "src", "", basketIt, "imageSrc") self.getElemAttrIfPresent(productSoup, "div", "m-product-volume", "", "text", r"\W", basketIt, "productVolume") # Check if we are doing the trolley page - which has extra info like number of items ordered if isTrolleyPage: self.getElemAttrIfPresent(productSoup, "div", "m-product-title", "a", "text", "", basketIt, "productTitle") if not "productTitle" in basketIt or basketIt["productTitle"] == "": self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt, "productTitle") self.getElemAttrIfPresent(productSoup, "div", "quantity-append", "input", "value", "", basketIt, "trolleyQuantity") self.getElemAttrIfPresent(productSoup, "p", "m-product-details", "span", "text", "", basketIt, "trolleyPrice") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-price", "", basketIt, "price") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-priceperkg", "", basketIt, "pricePerKg") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-orderitemid", "", basketIt, "orderItemId") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-producttype", "", basketIt, "productType") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-productid", "", basketIt, "productId") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-uom", "", basketIt, "UOM") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-weighttype", "", basketIt, "weightType") self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-substitute", "", basketIt, "substitute") else: self.getElemAttrIfPresent(productSoup, "div", "m-product-price-container", "span", "text", "\W", basketIt, "price") self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "text", "", basketIt, "productTitle") if not "productTitle" in basketIt or basketIt["productTitle"] == "": self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt, "productTitle") # Check if the product at least has a title and only add to list if it does if not "productTitle" in basketIt or basketIt["productTitle"] == "": logging.error("Extract Shopping List: Failed to extract product name") else: shoppingItems.append(basketIt) return shoppingItems def getTrolleyContents(self): # Ensure we wait until the trolley-total is visible try: wait = WebDriverWait(self.webDriver, 20) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total"))) except WebDriverException: logging.error("Wait for Trolley-Total webdriver element exception") self.debugDumpPageSource() return None # Navigate to the basket contents try: self.clickButtonByXPath('//div[@class="mini-trolley"]//a') wait = WebDriverWait(self.webDriver, 30) wait.until(EC.visibility_of_element_located((By.ID, "my-trolley"))) except NoSuchElementException: logging.error("Press view trolley button no such element") self.debugDumpPageSource() return None except WebDriverException: logging.error("Press view trolley button webdriver element exception") self.debugDumpPageSource() return None # Get the shopping items on the current page return self.getShoppingItems(True) def getFavourites(self): # Ensure we wait until the favourites is visible try: wait = WebDriverWait(self.webDriver, 20) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-navbar-favourites"))) except WebDriverException: logging.error("Wait for favourites button webdriver element exception") self.debugDumpPageSource() return None # Navigate to the favourites try: FAVOURITES_BUTTON_XPATH = '//a[@class="js-navbar-favourites"]' elemBasketBtn = self.webDriver.find_element_by_xpath(FAVOURITES_BUTTON_XPATH) print(elemBasketBtn) elemBasketBtn.click() wait = WebDriverWait(self.webDriver, 60) wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "products-grid"))) except NoSuchElementException: logging.error("Press view favourites button no such element") self.debugDumpPageSource() return None except WebDriverException: logging.error("Press view favourites button webdriver element exception") self.debugDumpPageSource() return None # Get the shopping items on the current page return self.getShoppingItems(False) # Handle site login def siteLogin(self, siteUrl, username, password, titleMustContainStr): # Start webDriver if not self.startWebDriver(): logging.error("Unable to start webdriver") return False self.isInitalized = True # Go to URL logging.info("Webdriver going to " + siteUrl) self.webDriver.get(siteUrl) logging.info("Webdriver site title = " + self.webDriver.title) if not titleMustContainStr in self.webDriver.title: logging.error("Site " + siteUrl + " title doesn't contain " + titleMustContainStr) self.debugDumpPageSource() return False # Handle login self.isLoggedIn = self.websiteLogin(username, password, 1) # Succeeded so far return self.isLoggedIn # Ensure that we are logged in def ensureLoggedIn(self, username, password): # Ensure we are initialised if not self.isInitalized: self.siteLogin("http://www.waitrose.com", username, password, "Waitrose") # Try to login again if not currently logged in if self.isInitalized: if not self.isLoggedIn: self.isLoggedIn = self.websiteLogin(username, password, 2) return self.isLoggedIn
isc
-783,531,863,977,082,500
46.895833
169
0.581654
false
4.352335
false
false
false
kidscancode/gamedev
pygame template.py
1
1508
# Pygame Template # Use this to start a new Pygame project # KidsCanCode 2015 import pygame import random # define some colors (R, G, B) WHITE = (255, 255, 255) GREEN = (0, 255, 0) BLUE = (0, 0, 255) BLACK = (0, 0, 0) FUCHSIA = (255, 0, 255) GRAY = (128, 128, 128) LIME = (0, 128, 0) MAROON = (128, 0, 0) NAVYBLUE = (0, 0, 128) OLIVE = (128, 128, 0) PURPLE = (128, 0, 128) RED = (255, 0, 0) SILVER = (192, 192, 192) TEAL = (0, 128, 128) YELLOW = (255, 255, 0) ORANGE = (255, 128, 0) CYAN = (0, 255, 255) # basic constants to set up your game WIDTH = 360 HEIGHT = 480 FPS = 30 BGCOLOR = BLACK # initialize pygame pygame.init() # initialize sound - uncomment if you're using sound # pygame.mixer.init() # create the game window and set the title screen = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("My Game") # start the clock clock = pygame.time.Clock() # set the 'running' variable to False to end the game running = True # start the game loop while running: # keep the loop running at the right speed clock.tick(FPS) # Game loop part 1: Events ##### for event in pygame.event.get(): # this one checks for the window being closed if event.type == pygame.QUIT: pygame.quit() # add any other events here (keys, mouse, etc.) # Game loop part 2: Updates ##### # Game loop part 3: Draw ##### screen.fill(BGCOLOR) # after drawing, flip the display pygame.display.flip() # close the window pygame.quit()
mit
3,454,655,415,024,161,000
22.936508
55
0.63992
false
2.998012
false
false
false
hhucn/git-vote
git-vote/__main__.py
1
3022
import argparse import collections import re import subprocess NOTES_REF = 'refs/notes/votes' Vote = collections.namedtuple('Vote', ['commit', 'user']) def vote(args): assert args.user, 'TODO: determine user automatically' vote = 'vote:%s' % args.user subprocess.check_call([ 'git', 'notes', '--ref', NOTES_REF, 'append', '--allow-empty', '-m', vote, args.COMMIT], cwd=args.repo_dir) # TODO: prevent voting twice as same user def get_all_votes(repo_dir): output_bytes = subprocess.check_output([ 'git', 'notes', '--ref', NOTES_REF, 'list'], cwd=repo_dir) output = output_bytes.decode('utf-8') for line in output.splitlines(): if not line: continue votenote_ref, commit_id = line.split() # TODO use dulwich or something more efficient here votenote_bytes = subprocess.check_output( ['git', 'show', votenote_ref], cwd=repo_dir) votenote_content = votenote_bytes.decode('utf-8') # TODO ignore invalid votes for voteline in votenote_content.splitlines(): if not voteline: continue m = re.match(r'^vote:(?P<user>[a-z0-9@._]+)$', voteline.strip()) # TODO check re for user spec if not m: print('Skipping crap %r' % voteline) continue user = m.group('user') yield Vote(commit=commit_id, user=user) def print_list(args): all_votes = get_all_votes(args.repo_dir) all_votes_sorted = sorted(all_votes, key=lambda v: (v.commit, v.user)) for v in all_votes_sorted: print('%s: +1 from %s' % (v.commit, v.user)) def tally(all_votes): """ Returns a dict commit id => set of users """ res = collections.defaultdict(set) for v in all_votes: res[v.commit].add(v.user) return res def print_tally(args): all_votes = get_all_votes(args.repo_dir) for commit, votes in sorted(tally(all_votes).items(), key=lambda kv: (kv[1], kv[0])): print('%s: %d votes' % (commit, len(votes))) def print_elect(args): all_votes = get_all_votes(args.repo_dir) winner_vcount, winner_commit = max((len(votes), commit) for commit, votes in tally(all_votes).items()) # TODO more algorithms print('%s won the election with %d votes' % (winner_commit, winner_vcount)) def main(): parser = argparse.ArgumentParser('Vote on git commands') parser.add_argument('-r', '--repo-dir', metavar='DIR', help='root directory of the repository to modify') subparsers = parser.add_subparsers(dest='cmd') vote_parser = subparsers.add_parser('vote', help='Vote for commit') vote_parser.add_argument('--user', metavar='USER_ID', help='ID of the user to vote as') vote_parser.add_argument('COMMIT', help='reference to the commit to vote for') subparsers.add_parser('list', help='List all votes') subparsers.add_parser('tally', help='Tally all votes') subparsers.add_parser('elect', help='Elect a commit') args = parser.parse_args() if args.cmd == 'vote': vote(args) elif args.cmd == 'list': print_list(args) elif args.cmd == 'tally': print_tally(args) elif args.cmd == 'elect': print_elect(args) else: parser.print_help() if __name__ == '__main__': main()
apache-2.0
2,600,720,082,252,724,000
28.627451
106
0.676704
false
2.869896
false
false
false
kerimlcr/ab2017-dpyo
ornek/lollypop/lollypop-0.9.229/src/web.py
1
7411
# Copyright (c) 2014-2016 Cedric Bellegarde <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gi.repository import GObject, GLib from threading import Thread from time import time from lollypop.sqlcursor import SqlCursor from lollypop.tagreader import TagReader from lollypop.web_youtube import WebYouTube from lollypop.web_jgm90 import WebJmg90 from lollypop.define import Lp, DbPersistent, Type from lollypop.lio import Lio class Web(GObject.Object): """ Web helper """ __gsignals__ = { 'saved': (GObject.SignalFlags.RUN_FIRST, None, (int,)), 'progress': (GObject.SignalFlags.RUN_FIRST, None, (float,)) } def play_track(track, play, callback): """ Play track @param track as Track @param play as bool @param callback as func(uri: str, track: Track, play: bool) """ if track.is_jgm: uri = WebJmg90.get_uri_content(track.uri) elif track.is_youtube: uri = WebYouTube.get_uri_content(track.uri) else: return GLib.idle_add(callback, uri, track, play) def __init__(self): """ Init helper """ GObject.Object.__init__(self) self.__helpers = [WebJmg90(), WebYouTube()] def save_track(self, item, persistent): """ Save item into collection as track @param item as SearchItem @param persistent as DbPersistent """ t = Thread(target=self.__save_track_thread, args=(item, persistent)) t.daemon = True t.start() def save_album(self, item, persistent): """ Save item into collection as album @param item as SearchItem @param persistent as DbPersistent """ t = Thread(target=self.__save_album_thread, args=(item, persistent)) t.daemon = True t.start() ####################### # PRIVATE # ####################### def __save_album_thread(self, item, persistent): """ Save item into collection as album @param item as SearchItem @param persistent as DbPersistent """ nb_items = len(item.subitems) # Should not happen but happen :-/ if nb_items == 0: return start = 0 album_artist = item.subitems[0].artists[0] album_id = None for track_item in item.subitems: (album_id, track_id) = self.__save_track(track_item, persistent, album_artist) if track_id is None: continue # Download cover if start == 0: t = Thread(target=self.__save_cover, args=(item, album_id)) t.daemon = True t.start() start += 1 GLib.idle_add(self.emit, "progress", start / nb_items) GLib.idle_add(self.emit, "progress", 1) if Lp().settings.get_value('artist-artwork'): Lp().art.cache_artists_info() if album_id is not None: GLib.idle_add(self.emit, "saved", album_id) def __save_track_thread(self, item, persistent): """ Save item into collection as track @param item as SearchItem @param persistent as DbPersistent """ album_artist = item.artists[0] (album_id, track_id) = self.__save_track(item, persistent, album_artist) if track_id is None: return self.__save_cover(item, album_id) if Lp().settings.get_value('artist-artwork'): Lp().art.cache_artists_info() GLib.idle_add(self.emit, "saved", track_id) def __save_track(self, item, persistent, album_artist): """ Save item into collection as track @param item as SearchItem @param persistent as DbPersistent @param album artist as str @return (album id as int, track id as int) """ # Get uri from helpers for helper in self.__helpers: uri = helper.get_uri(item) if uri: break # Don't found anything if not uri: return (None, None) track_id = Lp().tracks.get_id_by_uri(uri) # Check if track needs to be updated if track_id is not None: if Lp().tracks.get_persistent(track_id) == DbPersistent.NONE\ and persistent == DbPersistent.EXTERNAL: Lp().tracks.set_persistent(track_id, DbPersistent.EXTERNAL) return (None, None) t = TagReader() with SqlCursor(Lp().db) as sql: # Happen often with Itunes/Spotify if album_artist not in item.artists: item.artists.append(album_artist) artists = "; ".join(item.artists) artist_ids = t.add_artists(artists, album_artist, "") album_artist_ids = t.add_album_artists(album_artist, "") (album_id, new_album) = t.add_album(item.album, album_artist_ids, "", False, 0, 0, int(time()), True) # FIXME: Check this, could move this in add_album() if new_album: Lp().albums.set_synced(album_id, Type.NONE) if persistent == DbPersistent.CHARTS: genre_ids = [Type.CHARTS] new_artist_ids = [] else: new_artist_ids = list(set(artist_ids) | set(album_artist_ids)) genre_ids = t.add_genres("Web", album_id) # Add track to db track_id = Lp().tracks.add(item.name, uri, item.duration, 0, item.discnumber, "", album_id, item.year, 0, 0, 0, persistent) t.update_track(track_id, artist_ids, genre_ids) t.update_album(album_id, album_artist_ids, genre_ids, None) sql.commit() for genre_id in genre_ids: GLib.idle_add(Lp().scanner.emit, 'genre-updated', genre_id, True) for artist_id in new_artist_ids: GLib.idle_add(Lp().scanner.emit, 'artist-updated', artist_id, True) return (album_id, track_id) def __save_cover(self, item, album_id): """ Save cover to store @param item as SearchItem @param album id as int """ f = Lio.File.new_for_uri(item.cover) (status, data, tag) = f.load_contents(None) if status: Lp().art.save_album_artwork(data, album_id)
gpl-3.0
2,696,436,820,573,915,000
36.619289
79
0.54716
false
4.005946
false
false
false
pybel/pybel
src/pybel/io/nodelink.py
1
7238
# -*- coding: utf-8 -*- """Conversion functions for BEL graphs with node-link JSON.""" import gzip import json from io import BytesIO from itertools import chain, count from operator import methodcaller from typing import Any, Mapping, TextIO, Union from networkx.utils import open_file from .utils import ensure_version from ..constants import ( ANNOTATIONS, CITATION, FUSION, GRAPH_ANNOTATION_CURIE, GRAPH_ANNOTATION_LIST, GRAPH_ANNOTATION_MIRIAM, MEMBERS, PARTNER_3P, PARTNER_5P, PRODUCTS, REACTANTS, SOURCE_MODIFIER, TARGET_MODIFIER, ) from ..dsl import BaseEntity from ..language import citation_dict from ..struct import BELGraph from ..struct.graph import _handle_modifier from ..tokens import parse_result_to_dsl from ..utils import hash_edge, tokenize_version __all__ = [ 'to_nodelink', 'to_nodelink_file', 'to_nodelink_gz', 'to_nodelink_jsons', 'from_nodelink', 'from_nodelink_file', 'from_nodelink_gz', 'from_nodelink_jsons', 'to_nodelink_gz_io', 'from_nodelink_gz_io', ] def to_nodelink(graph: BELGraph) -> Mapping[str, Any]: """Convert this graph to a node-link JSON object. :param graph: BEL Graph """ graph_json_dict = _to_nodelink_json_helper(graph) _prepare_graph_dict(graph_json_dict['graph']) return graph_json_dict def _prepare_graph_dict(g): # Convert annotation list definitions (which are sets) to canonicalized/sorted lists g[GRAPH_ANNOTATION_LIST] = { keyword: list(sorted(values)) for keyword, values in g.get(GRAPH_ANNOTATION_LIST, {}).items() } g[GRAPH_ANNOTATION_CURIE] = list(sorted(g[GRAPH_ANNOTATION_CURIE])) g[GRAPH_ANNOTATION_MIRIAM] = list(sorted(g[GRAPH_ANNOTATION_MIRIAM])) @open_file(1, mode='w') def to_nodelink_file(graph: BELGraph, path: Union[str, TextIO], **kwargs) -> None: """Write this graph as node-link JSON to a file. :param graph: A BEL graph :param path: A path or file-like """ graph_json_dict = to_nodelink(graph) json.dump(graph_json_dict, path, ensure_ascii=False, **kwargs) def to_nodelink_gz(graph, path: str, **kwargs) -> None: """Write a graph as node-link JSON to a gzip file.""" with gzip.open(path, 'wt') as file: json.dump(to_nodelink(graph), file, ensure_ascii=False, **kwargs) def to_nodelink_jsons(graph: BELGraph, **kwargs) -> str: """Dump this graph as a node-link JSON object to a string.""" return json.dumps(to_nodelink(graph), ensure_ascii=False, **kwargs) def from_nodelink(graph_json_dict: Mapping[str, Any], check_version: bool = True) -> BELGraph: """Build a graph from node-link JSON Object.""" pybel_version = tokenize_version(graph_json_dict['graph']['pybel_version']) if pybel_version[1] < 14: # if minor version is less than 14 raise ValueError('Invalid NodeLink JSON from old version of PyBEL (v{}.{}.{})'.format(*pybel_version)) graph = _from_nodelink_json_helper(graph_json_dict) return ensure_version(graph, check_version=check_version) @open_file(0, mode='r') def from_nodelink_file(path: Union[str, TextIO], check_version: bool = True) -> BELGraph: """Build a graph from the node-link JSON contained in the given file. :param path: A path or file-like """ return from_nodelink(json.load(path), check_version=check_version) def from_nodelink_gz(path: str) -> BELGraph: """Read a graph as node-link JSON from a gzip file.""" with gzip.open(path, 'rt') as file: return from_nodelink(json.load(file)) def from_nodelink_jsons(graph_json_str: str, check_version: bool = True) -> BELGraph: """Read a BEL graph from a node-link JSON string.""" return from_nodelink(json.loads(graph_json_str), check_version=check_version) def _to_nodelink_json_helper(graph: BELGraph) -> Mapping[str, Any]: """Convert a BEL graph to a node-link format. :param graph: BEL Graph Adapted from :func:`networkx.readwrite.json_graph.node_link_data` """ nodes = sorted(graph, key=methodcaller('as_bel')) mapping = dict(zip(nodes, count())) return { 'directed': True, 'multigraph': True, 'graph': graph.graph.copy(), 'nodes': [ _augment_node(node) for node in nodes ], 'links': [ dict( chain( data.copy().items(), [('source', mapping[u]), ('target', mapping[v]), ('key', key)], ), ) for u, v, key, data in graph.edges(keys=True, data=True) ], } def _augment_node(node: BaseEntity) -> BaseEntity: """Add the SHA-512 identifier to a node's dictionary.""" rv = node.copy() rv['id'] = node.md5 rv['bel'] = node.as_bel() for m in chain(node.get(MEMBERS, []), node.get(REACTANTS, []), node.get(PRODUCTS, [])): m.update(_augment_node(m)) if FUSION in node: node[FUSION][PARTNER_3P].update(_augment_node(node[FUSION][PARTNER_3P])) node[FUSION][PARTNER_5P].update(_augment_node(node[FUSION][PARTNER_5P])) return rv def _recover_graph_dict(graph: BELGraph): graph.graph[GRAPH_ANNOTATION_LIST] = { keyword: set(values) for keyword, values in graph.graph.get(GRAPH_ANNOTATION_LIST, {}).items() } graph.graph[GRAPH_ANNOTATION_CURIE] = set(graph.graph.get(GRAPH_ANNOTATION_CURIE, [])) graph.graph[GRAPH_ANNOTATION_MIRIAM] = set(graph.graph.get(GRAPH_ANNOTATION_MIRIAM, [])) def _from_nodelink_json_helper(data: Mapping[str, Any]) -> BELGraph: """Return graph from node-link data format. Adapted from :func:`networkx.readwrite.json_graph.node_link_graph` """ graph = BELGraph() graph.graph = data.get('graph', {}) _recover_graph_dict(graph) mapping = [] for node_data in data['nodes']: node = parse_result_to_dsl(node_data) graph.add_node_from_data(node) mapping.append(node) for data in data['links']: u = mapping[data['source']] v = mapping[data['target']] edge_data = { k: v for k, v in data.items() if k not in {'source', 'target', 'key'} } for side in (SOURCE_MODIFIER, TARGET_MODIFIER): side_data = edge_data.get(side) if side_data: _handle_modifier(side_data) if CITATION in edge_data: edge_data[CITATION] = citation_dict(**edge_data[CITATION]) if ANNOTATIONS in edge_data: edge_data[ANNOTATIONS] = graph._clean_annotations(edge_data[ANNOTATIONS]) graph.add_edge(u, v, key=hash_edge(u, v, edge_data), **edge_data) return graph def to_nodelink_gz_io(graph: BELGraph) -> BytesIO: """Get a BEL graph as a compressed BytesIO.""" bytes_io = BytesIO() with gzip.GzipFile(fileobj=bytes_io, mode='w') as file: s = to_nodelink_jsons(graph) file.write(s.encode('utf-8')) bytes_io.seek(0) return bytes_io def from_nodelink_gz_io(bytes_io: BytesIO) -> BELGraph: """Get BEL from gzipped nodelink JSON.""" with gzip.GzipFile(fileobj=bytes_io, mode='r') as file: s = file.read() j = s.decode('utf-8') return from_nodelink_jsons(j)
mit
2,647,336,991,651,230,700
31.168889
115
0.635811
false
3.235583
false
false
false
Mozu/mozu-python-sdk
mozurestsdk/platform/tenantextensions.py
1
2242
""" This code was generated by Codezu. Changes to this file may cause incorrect behavior and will be lost if the code is regenerated. """ from mozurestsdk.mozuclient import default as default_client from mozurestsdk.mozuurl import MozuUrl; from mozurestsdk.urllocation import UrlLocation from mozurestsdk.apicontext import ApiContext; class TenantExtensions(object): def __init__(self, apiContext: ApiContext = None, mozuClient = None): self.client = mozuClient or default_client(); if (apiContext is not None): self.client.withApiContext(apiContext); else: self.client.withApiContext(ApiContext()); def getExtensions(self,responseFields = None): """ Retrieves the Arc.js configuration settings for a site. Args: | responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss. Returns: | TenantExtensions Raises: | ApiException """ url = MozuUrl("/api/platform/extensions/?responseFields={responseFields}", "GET", UrlLocation.TenantPod, False); url.formatUrl("responseFields", responseFields); self.client.withResourceUrl(url).execute(); return self.client.result(); def updateExtensions(self,extensions, responseFields = None): """ Updates the Arc.js configuration settings for a site. Args: | extensions(extensions) - The updated details of the Arc.js configuration settings. | responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss. Returns: | TenantExtensions Raises: | ApiException """ url = MozuUrl("/api/platform/extensions/?responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False); url.formatUrl("responseFields", responseFields); self.client.withResourceUrl(url).withBody(extensions).execute(); return self.client.result();
apache-2.0
-7,380,671,656,172,497,000
32.181818
266
0.727029
false
3.872193
false
false
false
nigelb/SerialGrabber
examples/MQTT/SerialGrabber_Settings.py
1
2043
#!/usr/bin/env python # SerialGrabber reads data from a serial port and processes it with the # configured processor. # Copyright (C) 2012 NigelB # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import serial from serial_grabber.extractors import TransactionExtractor from serial_grabber.reader.SerialReader import SerialReader from serial_grabber.processor.UploadProcessor import UploadProcessor from serial_grabber.processor import CompositeProcessor from serial_grabber.mqtt import MqttCommander from serial_grabber.connections import SerialConnection # Serial Settings timeout = 1 port = "/dev/ttyUSB0" baud = 57600 parity = serial.PARITY_NONE stop_bits = 1 # MQTT settings mqtt_host = "localhost" mqtt_port = 1883 mqtt_auth = ('system', 'manager') # Settings cache_collision_avoidance_delay = 1 processor_sleep = 1 watchdog_sleep = 1 reader_error_sleep = 1 drop_carriage_return = True transaction = TransactionExtractor("default", "BEGIN DATA", "END DATA") reader = SerialReader(transaction, 1000, SerialConnection(port, baud, timeout=timeout, parity=parity, stop_bits=stop_bits)) commander = MqttCommander(mqtt_host, mqtt_port, mqtt_auth) uploadProcessor = UploadProcessor("https://example.org/cgi-bin/upload.py") processor = CompositeProcessor([commander.processor, uploadProcessor])
gpl-2.0
1,560,382,958,900,706,000
33.05
75
0.751836
false
3.87666
false
false
false
logicabrity/aeon
test/test_measurement.py
1
1229
import time import pytest from aeon.measurement import Measurement from aeon.errors import InvalidMeasurementState def test_cant_start_measurement_twice(): m = Measurement("name", "group") m.start() with pytest.raises(InvalidMeasurementState): m.start() def test_cant_stop_measurement_before_starting_it(): m = Measurement("name", "group") with pytest.raises(InvalidMeasurementState): m.stop() def test_cant_stop_measurement_twice(): m = Measurement("name", "group") m.start() m.stop() with pytest.raises(InvalidMeasurementState): m.stop() def test_starting_measurement_increases_number_of_calls(): m = Measurement("name", "group") assert m.calls == 0 m.start() assert m.calls == 1 def test_measurement_measures_something(): m = Measurement("name", "group") m.start() time.sleep(1e-3) m.stop() elapsed = m.total_runtime assert elapsed > 0 m.start() time.sleep(1e-3) m.stop() elapsed_again = m.total_runtime assert elapsed_again > elapsed @pytest.mark.fixed def test_measurement_has_name_and_group(): m = Measurement("name", "group") assert m.name == "name" assert m.group == "group"
mit
-1,662,716,423,659,156,000
21.345455
58
0.656631
false
3.385675
true
false
false
maas/maas
src/maasserver/middleware.py
1
18463
# Copyright 2012-2016 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Access middleware.""" import http.client import json import logging from pprint import pformat import sys import traceback import attr from crochet import TimeoutError from django.conf import settings from django.core.exceptions import PermissionDenied, ValidationError from django.core.handlers.exception import get_exception_response from django.http import ( Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseRedirect, ) from django.urls import get_resolver, get_urlconf, reverse from django.utils.encoding import force_str from django.utils.http import urlquote_plus from maasserver import logger from maasserver.clusterrpc.utils import get_error_message_for_exception from maasserver.components import ( discard_persistent_error, register_persistent_error, ) from maasserver.enum import COMPONENT from maasserver.exceptions import MAASAPIException from maasserver.models.config import Config from maasserver.models.node import RackController from maasserver.rbac import rbac from maasserver.rpc import getAllClients from maasserver.utils.orm import is_retryable_failure from provisioningserver.rpc.exceptions import ( NoConnectionsAvailable, PowerActionAlreadyInProgress, ) from provisioningserver.utils.shell import ExternalProcessError # 'Retry-After' header sent for httplib.SERVICE_UNAVAILABLE # responses. RETRY_AFTER_SERVICE_UNAVAILABLE = 10 PUBLIC_URL_PREFIXES = [ # Login page: must be visible to anonymous users. reverse("login"), # Authentication: must be visible to anonymous users. reverse("authenticate"), reverse("discharge-request"), # CSRF: only usable by logged in users, but returns FORBIDDEN instead of # a redirect to the login page on request of an unauthenticated user. reverse("csrf"), # The combo loaders are publicly accessible. reverse("robots"), # Metadata service is for use by nodes; no login. reverse("metadata"), # RPC information is for use by rack controllers; no login. reverse("rpc-info"), # Prometheus metrics with usage stats reverse("metrics"), # API meta-information is publicly visible. reverse("api_version"), reverse("api_v1_error"), # API calls are protected by piston. settings.API_URL_PREFIX, # Boot resources simple streams endpoint; no login. settings.SIMPLESTREAMS_URL_PREFIX, ] def is_public_path(path): """Whether a request.path is publicly accessible.""" return any(path.startswith(prefix) for prefix in PUBLIC_URL_PREFIXES) class AccessMiddleware: """Protect access to views. Most UI views are visible only to logged-in users, but there are pages that are accessible to anonymous users (e.g. the login page!) or that use other authentication (e.g. the MAAS API, which is managed through piston). """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): if is_public_path(request.path): return self.get_response(request) if request.user.is_anonymous: return HttpResponseRedirect( "/MAAS/?next=%s" % urlquote_plus(request.path) ) return self.get_response(request) class ExternalComponentsMiddleware: """Middleware to check external components at regular intervals.""" def __init__(self, get_response): self.get_response = get_response def _check_rack_controller_connectivity(self): """Check each rack controller to see if it's connected. If any rack controllers are disconnected, add a persistent error. """ controllers = RackController.objects.all() connected_ids = {client.ident for client in getAllClients()} disconnected_controllers = { controller for controller in controllers if controller.system_id not in connected_ids } if len(disconnected_controllers) == 0: discard_persistent_error(COMPONENT.RACK_CONTROLLERS) else: if len(disconnected_controllers) == 1: message = ( "One rack controller is not yet connected to the region" ) else: message = ( "%d rack controllers are not yet connected to the region" % len(disconnected_controllers) ) message = ( '%s. Visit the <a href="/MAAS/l/controllers">' "rack controllers page</a> for " "more information." % message ) register_persistent_error(COMPONENT.RACK_CONTROLLERS, message) def __call__(self, request): # This middleware hijacks the request to perform checks. Any # error raised during these checks should be caught to avoid # disturbing the handling of the request. Proper error reporting # should be handled in the check method itself. self._check_rack_controller_connectivity() return self.get_response(request) class ExceptionMiddleware: """Convert exceptions into appropriate HttpResponse responses. For example, a MAASAPINotFound exception processed by a middleware based on this class will result in an http 404 response to the client. Validation errors become "bad request" responses. .. middleware: https://docs.djangoproject.com /en/dev/topics/http/middleware/ """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): try: return self.get_response(request) except Exception as exception: response = self.process_exception(request, exception) if response: return response else: raise def process_exception(self, request, exception): encoding = "utf-8" if isinstance(exception, MAASAPIException): # Print a traceback if this is a 500 error. if ( settings.DEBUG or exception.api_error == http.client.INTERNAL_SERVER_ERROR ): self.log_exception(exception) # This type of exception knows how to translate itself into # an http response. return exception.make_http_response() elif isinstance(exception, ValidationError): if settings.DEBUG: self.log_exception(exception) if hasattr(exception, "message_dict"): # Complex validation error with multiple fields: # return a json version of the message_dict. return HttpResponseBadRequest( json.dumps(exception.message_dict), content_type="application/json", ) else: # Simple validation error: return the error message. return HttpResponseBadRequest( str("".join(exception.messages)).encode(encoding), content_type="text/plain; charset=%s" % encoding, ) elif isinstance(exception, PermissionDenied): if settings.DEBUG: self.log_exception(exception) return HttpResponseForbidden( content=str(exception).encode(encoding), content_type="text/plain; charset=%s" % encoding, ) elif isinstance(exception, ExternalProcessError): # Catch problems interacting with processes that the # appserver spawns, e.g. rndc. # # While this is a serious error, it should be a temporary # one as the admin should be checking and fixing, or it # could be spurious. There's no way of knowing, so the best # course of action is to ask the caller to repeat. if settings.DEBUG: self.log_exception(exception) response = HttpResponse( content=str(exception).encode(encoding), status=int(http.client.SERVICE_UNAVAILABLE), content_type="text/plain; charset=%s" % encoding, ) response["Retry-After"] = RETRY_AFTER_SERVICE_UNAVAILABLE return response elif isinstance(exception, Http404): if settings.DEBUG: self.log_exception(exception) return get_exception_response( request, get_resolver(get_urlconf()), 404, exception ) elif is_retryable_failure(exception): # We never handle retryable failures. return None elif isinstance(exception, SystemExit): return None else: # Print a traceback. self.log_exception(exception) # Return an API-readable "Internal Server Error" response. return HttpResponse( content=str(exception).encode(encoding), status=int(http.client.INTERNAL_SERVER_ERROR), content_type="text/plain; charset=%s" % encoding, ) def log_exception(self, exception): exc_info = sys.exc_info() logger.error(" Exception: %s ".center(79, "#") % str(exception)) logger.error("".join(traceback.format_exception(*exc_info))) class DebuggingLoggerMiddleware: log_level = logging.DEBUG def __init__(self, get_response): self.get_response = get_response # Taken straight out of Django 1.8 django.http.request module to improve # our debug output on requests (dropped in Django 1.9). @classmethod def _build_request_repr( self, request, path_override=None, GET_override=None, POST_override=None, COOKIES_override=None, META_override=None, ): """ Builds and returns the request's representation string. The request's attributes may be overridden by pre-processed values. """ # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = ( pformat(GET_override) if GET_override is not None else pformat(request.GET) ) except Exception: get = "<could not parse>" try: post = ( pformat(POST_override) if POST_override is not None else pformat(request.POST) ) except Exception: post = "<could not parse>" try: cookies = ( pformat(COOKIES_override) if COOKIES_override is not None else pformat(request.COOKIES) ) except Exception: cookies = "<could not parse>" try: meta = ( pformat(META_override) if META_override is not None else pformat(request.META) ) except Exception: meta = "<could not parse>" path = path_override if path_override is not None else request.path name = request.__class__.__name__ return force_str( f"<{name}\npath:{path},\nGET:{get},\nPOST:{post},\nCOOKIES:{cookies},\nMETA:{meta}>" ) def __call__(self, request): if settings.DEBUG_HTTP and logger.isEnabledFor(self.log_level): header = " Request dump ".center(79, "#") logger.log( self.log_level, "%s\n%s", header, self._build_request_repr(request), ) response = self.get_response(request) if settings.DEBUG_HTTP and logger.isEnabledFor(self.log_level): header = " Response dump ".center(79, "#") content = getattr(response, "content", "{no content}") try: decoded_content = content.decode("utf-8") except UnicodeDecodeError: logger.log( self.log_level, "%s\n%s", header, "** non-utf-8 (binary?) content **", ) else: logger.log(self.log_level, "%s\n%s", header, decoded_content) return response class RPCErrorsMiddleware: """A middleware for handling RPC errors.""" handled_exceptions = ( NoConnectionsAvailable, PowerActionAlreadyInProgress, TimeoutError, ) def __init__(self, get_response): self.get_response = get_response def __call__(self, request): try: return self.get_response(request) except Exception as exception: response = self.process_exception(request, exception) if response: return response else: raise def process_exception(self, request, exception): if request.path.startswith(settings.API_URL_PREFIX): # Not a path we're handling exceptions for. # APIRPCErrorsMiddleware handles all the API request RPC # errors. return None if not isinstance(exception, self.handled_exceptions): # Nothing to do, since we don't care about anything other # than handled_exceptions. return None logging.exception(exception) return HttpResponseRedirect(request.path) class APIRPCErrorsMiddleware(RPCErrorsMiddleware): """A middleware for handling RPC errors in API requests.""" handled_exceptions = { NoConnectionsAvailable: int(http.client.SERVICE_UNAVAILABLE), PowerActionAlreadyInProgress: int(http.client.SERVICE_UNAVAILABLE), TimeoutError: int(http.client.GATEWAY_TIMEOUT), } def process_exception(self, request, exception): if not request.path.startswith(settings.API_URL_PREFIX): # Not a path we're handling exceptions for. # RPCErrorsMiddleware handles non-API requests. return None if exception.__class__ not in self.handled_exceptions: # This isn't something we handle; allow processing to # continue. return None status = self.handled_exceptions[exception.__class__] logging.exception(exception) error_message = get_error_message_for_exception(exception) encoding = "utf-8" response = HttpResponse( content=error_message.encode(encoding), status=status, content_type="text/plain; charset=%s" % encoding, ) if status == http.client.SERVICE_UNAVAILABLE: response["Retry-After"] = RETRY_AFTER_SERVICE_UNAVAILABLE return response class CSRFHelperMiddleware: """A Middleware to decide whether a request needs to be protected against CSRF attacks. Requests with a session cookie (i.e. requests for which the basic session-based Django authentification is used) will be CSRF protected. Requests without this cookie are pure 0-legged API requests and thus don't need to use the CSRF protection machinery because each request is signed. """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): session_cookie = request.COOKIES.get( settings.SESSION_COOKIE_NAME, None ) if session_cookie is None: # csrf_processing_done is a field used by Django. We use it here # to bypass the CSRF protection when it's not needed (i.e. when the # request is OAuth-authenticated). request.csrf_processing_done = True return self.get_response(request) @attr.s class ExternalAuthInfo: """Hold information about external authentication.""" type = attr.ib() url = attr.ib() domain = attr.ib(default="") admin_group = attr.ib(default="") class ExternalAuthInfoMiddleware: """A Middleware adding information about the external authentication. This adds an `external_auth_info` attribute to the request, which is an ExternalAuthInfo instance if external authentication is enabled, None otherwise. """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): configs = Config.objects.get_configs( [ "external_auth_url", "external_auth_domain", "external_auth_admin_group", "rbac_url", ] ) rbac_endpoint = configs.get("rbac_url") candid_endpoint = configs.get("external_auth_url") auth_endpoint, auth_domain, auth_admin_group = "", "", "" if rbac_endpoint: auth_type = "rbac" auth_endpoint = rbac_endpoint.rstrip("/") + "/auth" elif candid_endpoint: auth_type = "candid" auth_endpoint = candid_endpoint auth_domain = configs.get("external_auth_domain") auth_admin_group = configs.get("external_auth_admin_group") auth_info = None if auth_endpoint: # strip trailing slashes as js-bakery ends up using double slashes # in the URL otherwise auth_info = ExternalAuthInfo( type=auth_type, url=auth_endpoint.rstrip("/"), domain=auth_domain, admin_group=auth_admin_group, ) request.external_auth_info = auth_info return self.get_response(request) class RBACMiddleware: """Middleware that cleans the RBAC thread-local cache. At the end of each request the RBAC client that is held in the thread-local needs to be cleaned up. That way the next request on the same thread will use a new RBAC client. """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): result = self.get_response(request) # Now that the response has been handled, clear the thread-local # state of the RBAC connection. rbac.clear() return result
agpl-3.0
-8,170,894,622,841,870,000
34.505769
96
0.611222
false
4.598506
true
false
false
ivanamihalek/tcga
icgc/60_nextgen_production/65_reactome_tree.py
1
5057
#! /usr/bin/python3 # # This source code is part of icgc, an ICGC processing pipeline. # # Icgc is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Icgc is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see<http://www.gnu.org/licenses/>. # # Contact: [email protected] # # some pathways do not have the associated genes listed, probably by mistake # examples: # R-HSA-1483171 | Synthesis of BMP # R-HSA-2408499 | Formation of selenosugars for excretion from icgc_utils.common_queries import quotify from icgc_utils.reactome import * from config import Config ############ def print_genes(cursor, gene_ids, depth): if len(gene_ids)<1: print("\t"*depth, "no genes listed") return #print("\t"*depth, "print genes here") gene_id_string = ",".join([quotify(z) for z in gene_ids]) qry = "select ensembl_gene_id, approved_name from hgnc where ensembl_gene_id in (%s)" % gene_id_string gene_names = dict(hard_landing_search(cursor, qry)) qry = "select ensembl_gene_id, approved_symbol from hgnc where ensembl_gene_id in (%s)" % gene_id_string gene_symbols = dict(hard_landing_search(cursor, qry)) for gene in gene_ids: print("\t"*depth, gene_symbols.get(gene,""), gene_names.get(gene,"")) return ############## def characterize_subtree(cursor, graph, pthwy_id, gene_groups, depth, verbose=True): # this is the whole subtree # children = [node for node in nx.dfs_preorder_nodes(graph, pthwy_id)] # A successor of n is a node m such that there exists a directed edge from n to m. children = [node for node in graph.successors(pthwy_id)] if len(children)==0: return False node_id_string = ",".join([quotify(z) for z in children]) qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)" children_names = hard_landing_search(cursor, qry_template % node_id_string) for child_id, child_name in children_names: # number_of_genes = genes related to nodes without descendants genes = genes_in_subgraph(cursor, graph, child_id) if verbose: print("\t"*depth, child_id, child_name, len(genes)) if len(genes)<100: if verbose: print_genes(cursor, genes, depth+1) gene_groups[child_name] = genes continue if not characterize_subtree(cursor, graph, child_id, gene_groups, depth+1, verbose=verbose): # no further subdivisions if verbose: print_genes(cursor, genes, depth+1) gene_groups[child_name] = genes continue return True ######################################### import numpy as np from matplotlib import pyplot as plt def hist_plot(gene_groups): data = [len(gene_list) for gene_list in list(gene_groups.values())] # fixed bin size bins = np.arange(0, 505, 5) # fixed bin size plt.xlim(0,500) plt.hist(data, bins=bins, alpha=0.5) # plt.title('') plt.xlabel('number of genes in group (bin size = 5)') plt.ylabel('number of groups') # plt.show() #################################################### def main(): verbose = False db = connect_to_mysql(Config.mysql_conf_file) cursor = db.cursor() switch_to_db(cursor, 'icgc') # are there children with multiple parents? Yes. So I need some kind of # directed graph, rather tha a tree. qry = "select child, count(distinct parent) as ct from reactome_hierarchy " qry += "group by child having ct>1" ret = search_db(cursor, qry) print("number of children with multiple parents:", len(ret)) # feed the parent/child pairs as edges into graph graph = build_reactome_graph(cursor, verbose=True) # candidate roots zero_in_degee_nodes = get_roots(graph) node_id_string = ",".join([quotify(z) for z in zero_in_degee_nodes]) qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)" root_names = hard_landing_search(cursor, qry_template% node_id_string) gene_groups = {} for pthwy_id, name in root_names: if "disease" in name.lower(): continue if verbose: print(pthwy_id, name) characterize_subtree(cursor, graph, pthwy_id, gene_groups, 1, verbose=verbose) print("\n===========================") max_group=0 for group, genes in gene_groups.items(): groupsize = len(genes) if max_group< groupsize: max_group=groupsize print (group, len(genes)) print("\n===========================") print("number of groups", len(gene_groups)) print("largest group", max_group) print("\n===========================") for pthwy_name, genes in gene_groups.items(): if len(genes)<=150: continue print("\n",pthwy_name, len(genes)) #print_genes(cursor, genes, 1) #hist_plot(gene_groups) cursor.close() db.close() ######################################### if __name__ == '__main__': main()
gpl-3.0
-438,034,926,294,668,700
35.121429
120
0.677477
false
3.053744
false
false
false
soccermetrics/marcotti-mls
marcottimls/models/financial.py
1
5658
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, ForeignKeyConstraint, Boolean from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import CheckConstraint import enums from common import BaseSchema class AcquisitionPaths(BaseSchema): """ MLS player acquisition data model. Captures **initial** entry path into league. """ __tablename__ = 'acquisitions' player_id = Column(Integer, ForeignKey('players.id'), primary_key=True) year_id = Column(Integer, ForeignKey('years.id'), primary_key=True) path = Column(enums.AcquisitionType.db_type()) discriminator = Column('type', String(20)) club_id = Column(Integer, ForeignKey('clubs.id')) club = relationship('Clubs', backref=backref('acquisitions')) player = relationship('Players', backref=backref('entry')) year = relationship('Years', backref=backref('acquisitions')) __mapper_args__ = { 'polymorphic_identity': 'acquisitions', 'polymorphic_on': discriminator } class PlayerDrafts(AcquisitionPaths): """ Player draft data model. """ __mapper_args__ = {'polymorphic_identity': 'draft'} round = Column(Integer, CheckConstraint('round > 0')) selection = Column(Integer, CheckConstraint('selection > 0')) gen_adidas = Column(Boolean, default=False) def __repr__(self): return u"<PlayerDraft(name={0}, year={1}, round={2}, selection={3}, generation_adidas={4})>".format( self.player.full_name, self.year.yr, self.round, self.selection, self.gen_adidas).encode('utf-8') def __unicode__(self): return u"<PlayerDraft(name={0}, year={1}, round={2}, selection={3}, generation_adidas={4})>".format( self.player.full_name, self.year.yr, self.round, self.selection, self.gen_adidas) class PlayerSalaries(BaseSchema): """ Player salary data model. """ __tablename__ = 'salaries' __table_args__ = ( ForeignKeyConstraint( ['competition_id', 'season_id'], ['competition_seasons.competition_id', 'competition_seasons.season_id'], ), ) id = Column(Integer, Sequence('salary_id_seq', start=10000), primary_key=True) base_salary = Column(Integer, CheckConstraint('base_salary >= 0'), doc="Base salary in cents") avg_guaranteed = Column(Integer, CheckConstraint('avg_guaranteed >= 0'), doc="Average annualized guaranteed compensation in cents") player_id = Column(Integer, ForeignKey('players.id')) club_id = Column(Integer, ForeignKey('clubs.id')) competition_id = Column(Integer) season_id = Column(Integer) player = relationship('Players', backref=backref('salaries')) club = relationship('Clubs', backref=backref('payroll')) comp_season = relationship('CompetitionSeasons', backref=backref('payroll')) def __repr__(self): return u"<PlayerSalary(name={0}, club={1}, competition={2}, season={3}, base={4:.2f}, " \ u"guaranteed={5:.2f})>".format(self.player.full_name, self.club.name, self.comp_season.competition.name, self.comp_season.season.name, self.base_salary/100.00, self.avg_guaranteed/100.00).encode('utf-8') def __unicode__(self): return u"<PlayerSalary(name={0}, club={1}, competition={2}, season={3}, base={4:.2f}, " \ u"guaranteed={5:.2f})>".format(self.player.full_name, self.club.name, self.comp_season.competition.name, self.comp_season.season.name, self.base_salary / 100.00, self.avg_guaranteed / 100.00) class PartialTenures(BaseSchema): """ Data model that captures player's partial-season tenure at a club. """ __tablename__ = 'partials' __table_args__ = ( ForeignKeyConstraint( ['competition_id', 'season_id'], ['competition_seasons.competition_id', 'competition_seasons.season_id'], ), ) id = Column(Integer, Sequence('partial_id_seq', start=10000), primary_key=True) start_week = Column(Integer, CheckConstraint('start_week > 0')) end_week = Column(Integer, CheckConstraint('end_week > 0')) player_id = Column(Integer, ForeignKey('players.id')) club_id = Column(Integer, ForeignKey('clubs.id')) competition_id = Column(Integer) season_id = Column(Integer) player = relationship('Players', backref=backref('partials')) club = relationship('Clubs', backref=backref('partials')) comp_season = relationship('CompetitionSeasons', backref=backref('partials')) def __repr__(self): return u"<PartialTenure(name={0}, club={1}, competition={2}, season={3}, " \ u"start_week={4}, end_week={5})>".format(self.player.full_name, self.club.name, self.comp_season.competition.name, self.comp_season.season.name, self.start_week, self.end_week).encode('utf-8') def __unicode__(self): return u"<PartialTenure(name={0}, club={1}, competition={2}, season={3}, " \ u"start_week={4}, end_week={5})>".format(self.player.full_name, self.club.name, self.comp_season.competition.name, self.comp_season.season.name, self.start_week, self.end_week)
mit
-645,660,621,350,676,700
42.19084
114
0.59597
false
3.792225
false
false
false
koepferl/FluxCompensator
fluxcompensator/cube.py
1
25142
from copy import deepcopy import os ROOT = os.path.dirname(os.path.abspath(__file__)) + '/' from astropy import log as logger from astropy.io import fits import numpy as np from numpy.random import normal from .psf import GaussianPSF, FilePSF, FunctionPSF from .filter import Filter from .utils.plot import MakePlots from .utils.resolution import ConservingZoom, central from .utils.tools import properties, grid_units, get_slices, average_collapse, central_wav from .utils.units import ConvertUnits # submitting PhD thesis today :) class SyntheticCube(object): ''' SyntheticCube is part the FluxCompensator. It converts input_arrays (e. g. HYPERION ModelOutput) to "realistic" synthetic observations (e.g. accounts for PSF, filters & noise). It contains attributes like ModelOutput (see Notes). If input_array is already a SyntheticCube object, the attributes are passed. If input_array is not a SyntheticCube object, SyntheticCube specific attributes are defined and then passed. Parameters ---------- input_array : SyntheticCube, ModelOutput, optional input_array also reads arrays with ModelOutput like properties. unit_out : str, optional The output units for SyntheticCube val. Valid options are: * ``'ergs/cm^2/s'`` * ``'ergs/cm^2/s/Hz'`` * ``'Jy'`` * ``'mJy'`` * ``'MJy/sr'`` The default is ``'ergs/cm^2/s'``. name : str The name of the FluxCompensator object until another input_array is called. The default is ``None``. Attributes ---------- wav : numpy.ndarray The wavelengths of val cube slices in microns. val : numpy.ndarray The 3D cube with shape (x, y, wav). units : str Current units of the val cube. distance : str Distance to the observed object in cm. x_min : float Physical offset from axis origin in FOV in cm. x_max : float Physical offset from axis origin in FOV in cm. y_min : float Physical offset from axis origin in FOV in cm. y_max : float Physical offset from axis origin in FOV in cm. lon_min : float Minimal longitudinal angle. lon_max : float Maximal longitudinal angle. lat_min : float Minimal latitudinal angle. lat_max : float Maximal latitudinal angle. pix_area_sr : float Pixel area per sr. Notes ----- unit_in : str Unit of val in input_array. Valid options are: * ``'ergs/cm^2/s'`` * ``'ergs/cm^2/s/Hz'`` * ``'Jy'`` * ``'mJy'`` * ``'MJy/sr'`` grid_unit : float Physical unit of FOV axis in cm. Valid options are: * ``au`` in cm * ``pc`` in cm * ``kpc`` in cm grid_unit_name Astronomical unit of FOV axis. Valid options are: * ``'au'`` * ``'pc'`` * ``'kpc'`` FOV : tuple Tuple ``FOV(x,y)`` of Field of View pixel entries. * pixel in x direction: ``FOV[0]`` * pixel in y direction: ``FOV[1]`` name : str The name of the FluxCompensator object until another input_array is called. The default is ``None``. stage : str Gives current operation stage of SyntheticCube. E. g. ``'SyntheticCube: convolve_filter'`` log : list List of strings of the previous and current stages. filter : dict Dictionary ``filter = {name, waf_0, waf_min, waf_max}`` of the applied filter. * name of filter: ``filter['name']`` * central wavelength: ``filter['waf_0']`` * minimal wavelength: ``filter['waf_min']`` * maximal wavelength: ``filter['waf_max']`` Returns ------- cube : SyntheticCube 3D val array with SyntheticCube properties. image : SyntheticImage 2D val array with SyntheticImage properties. sed : SyntheticSED 1D val array (collapsed rough SED) with SyntheticSED properties. flux : SyntheticFlux 0D val array (scalar) with SyntheticFlux properties. ''' def __init__(self, input_array, unit_out='ergs/cm^2/s', name=None): # Hyperion ModelOutput attributes #if input_array.val.ndim == 3: self.val = np.array(deepcopy(input_array.val)) #else: # raise Exception('input_array does not have the right dimensions. numpy array of (x, y, wav) is required.') self.wav = np.array(deepcopy(input_array.wav)) self.units = input_array.units self.distance = input_array.distance self.x_max = input_array.x_max self.x_min = input_array.x_min self.y_max = input_array.y_max self.y_min = input_array.y_min self.lon_min = input_array.lon_min self.lon_max = input_array.lon_max self.lat_min = input_array.lat_min self.lat_max = input_array.lat_max self.pix_area_sr = input_array.pix_area_sr ################## # new attributes # ################## if isinstance(input_array, SyntheticCube): # attributes with are passed, since input_array is SyntheticCube # physical values self.unit_in = input_array.unit_in self.unit_out = input_array.unit_out self.grid_unit = input_array.grid_unit self.grid_unit_name = input_array.grid_unit_name # properties of cube self.FOV = deepcopy(input_array.FOV) # name self.name = input_array.name self.stage = input_array.stage self.log = deepcopy(input_array.log) # filter self.filter = deepcopy(input_array.filter) else: # attributes are defined, since input_array is NOT SyntheticCube # physical values self.unit_in = input_array.units self.unit_out = unit_out self.grid_unit = grid_units(self.x_max - self.x_min)['grid_unit'] self.grid_unit_name = grid_units(self.x_max - self.x_min)['grid_unit_name'] self.FOV = (self.x_max - self.x_min, self.y_max - self.y_min) # name self.name = name self.stage = 'SyntheticCube: initial' self.log = [self.stage] # filter self.filter = {'name': None, 'waf_0': None, 'waf_min': None, 'waf_max': None} # convert into val units into unit_out s = ConvertUnits(wav=self.wav, val=self.val) self.val = s.get_unit(in_units=self.unit_in, out_units=self.unit_out, input_resolution=self.resolution['arcsec']) self.units = self.unit_out def extinction(self, A_v, input_opacities=None): ''' Accounts for reddening. Parameters ---------- A_v : Value of the visible extinction. input_opacities : ``None``, str If ``None`` standard extinction law is used. Otherwise a e. g. input_opacities.txt file can be passed as a str to read an opacity file with column #1 wav in microns and column #2 in cm^2/g. Default is ``None``. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: extinction' # read own extinction law if input_opacities is None: t = np.loadtxt(ROOT + 'database/extinction/extinction_law.txt') else: t = np.loadtxt(input_opacities) wav_ext = t[:, 0] k_lam = t[:, 1] # wav_ext monotonically increasing if wav_ext[0] > wav_ext[1]: wav_ext = wav_ext[::-1] k_lam = k_lam[::-1] k_v = np.interp(0.550, wav_ext, k_lam) # interpolate to get A_int for a certain wavelength k = np.interp(self.wav, wav_ext, k_lam) A_int_lam = A_v * (k / k_v) # apply extinction law val_ext = np.zeros(shape=np.shape(self.val)) val_ext[:,:,:len(self.wav)] = self.val[:,:,:len(self.wav)] * 10**(-0.4 * A_int_lam[:len(self.wav)]) # return SimulateCube c = SyntheticCube(self) c.val = val_ext c.stage = stage c.log.append(c.stage) return c def change_resolution(self, new_resolution, grid_plot=None): ''' Changes the resolution of every slice of the val cube. Parameters ---------- new_resolution : Resolution which the val array should get in ``arcsec/pixel.`` grid_plot : ``None``, ``True`` If ``True`` old and new resolution is visualized in a plot. Default is ``None``. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: change_resolution' # debugging comment logger.debug('-' * 70) logger.debug(stage) logger.debug('-' * 70) logger.debug('total value before zoom : ' + str('%1.4e' % np.sum(self.val)) + ' ' + str(self.units)) # match resolution of psf and val slice f = ConservingZoom(array=self.val, initial_resolution=self.resolution['arcsec'], new_resolution=new_resolution) zoomed_val = f.zoom() # average after changing resolution for MJy/sr if self.units == 'MJy/sr' or self.units == 'Jy/arcsec^2': # size of new pixel in units of old pixel size = new_resolution ** 2 / self.resolution['arcsec'] ** 2 zoomed_val = zoomed_val / size if grid_plot is not None: f.zoom_grid(self.name) # debugging comment logger.debug('total value after zoom : ' + str('%1.4e' % np.sum(zoomed_val)) + ' ' + str(self.units)) # return SimulateCube c = SyntheticCube(self) c.val = zoomed_val c.stage = stage c.log.append(c.stage) c.FOV = (f.len_nx / f.len_nrx * self.FOV[0], f.len_ny / f.len_nry * self.FOV[1]) return c def central_pixel(self, dx, dy): ''' Move array right and up to create a central pixel. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: central_pixel' ce = central(array=self.val, dx=dx, dy=dy) len_x_old = float(self.pixel[0]) len_x_new = float(len(ce[:,0])) len_y_old = float(self.pixel[1]) len_y_new = float(len(ce[0,:])) old_FOV = self.FOV new_FOV = (len_x_new / len_x_old * old_FOV[0], len_y_new / len_y_old * old_FOV[1]) # return SimulateCube c = SyntheticCube(self) c.val = ce c.stage = stage c.log.append(c.stage) c.FOV = new_FOV return c def convolve_psf(self, psf): ''' Convolves every slice of the val cube with a PSF of choice. Parameters ---------- psf : GaussianPSF, FilePSF, database, FunctionPSF * GaussianPSF(self, diameter): Convolves val with Gaussian PSF. * FilePSF(self, psf_file, condensed): Reads PSF from input file. * database: PSF object defined in FluxCompensator database. * FunctionPSF(self, psf_function, width): Convolves val with calculated PSF. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: convolve_PSF' # debugging comments if isinstance(psf, GaussianPSF): logger.debug('-' * 70) logger.debug(stage + 'with GaussianPSF') logger.debug('-' * 70) # convolve value with classes GaussianPSF, FilePSF and FunctionPSF val = self.val.copy() for i in range(len(self.wav)): val[:, :, i] = psf.convolve(wav = self.wav[i], array = self.val[:,:, i], resolution = self.resolution) # return SimulateCube c = SyntheticCube(self) c.val = val c.stage = stage c.log.append(c.stage) return c def convolve_filter(self, filter_input, plot_rebin=None, plot_rebin_dpi=None): ''' Convolves slice within filter limits into a 2D image. Parameters ---------- filter_input : object * database : if filter ``name`` from FluxCompensator database is used. * Filter : if own filter is used. plot_rebin : ``True``, ``None`` Switch to plot the rebined filter and the original filter in one plot. plot_rebin_dpi : ``None``, scalar > 0 The resolution in dots per inch. ``None`` is default and will use the value savefig.dpi in the matplotlibrc file. Returns ------- image : SyntheticImage ''' stage = 'SyntheticCube: convolve_filter' # debugging comment logger.debug('-' * 70) logger.debug(stage) logger.debug('-' * 70) weight = filter_input.rebin(self.wav, self.val) # returns weight{'wav_short' 'val_short' 'Response_new' 'filter_index' 'wavf_0' 'waf_min' 'waf_max' 'filter_name'} wav_short = weight['wav_short'] val_short = weight['val_short'] filter_index = weight['filter_index'] Response_new = weight['Response_new'] waf_0 = weight['waf_0'] waf_min = weight['waf_min'] waf_max = weight['waf_max'] filter_name = weight['filter_name'] if plot_rebin is not None: plot = filter_input.plot(val_name=self.name, dpi=plot_rebin_dpi) # weight val_short with rebined response val = val_short.copy() val[:, :, :len(wav_short)] = val_short[:,:, :len(wav_short)] * Response_new[:len(wav_short)] # collapse remaining cube into 2D val_2D = np.sum(val, axis=2) # return SyntheticImage from .image import SyntheticImage i = SyntheticImage(self) i.log.append(stage) i.stage = 'SyntheticImage: initial' i.log.append(i.stage) i.filter = {'name': filter_name, 'waf_0': waf_0, 'waf_min': waf_min, 'waf_max': waf_max} i.val = val_2D i.wav = np.array(waf_0) return i def add_noise(self, mu_noise, sigma_noise, seed=None, diagnostics=None): ''' Adds normal distributed noise to every slice in the val cube of SyntheticCube. Parameters ---------- mu_noise : float Mean of the normal distribution. Good choice: mu_noise = 0. sigma_noise : float Standard deviation of the normal distribution. Good choice around: * ``'ergs/cm^2/s'`` : sigma_noise = 10.**(-13) * ``'ergs/cm^2/s/Hz'`` : sigma_noise = 10.**(-26) * ``'Jy'`` : sigma_noise = 10.**(-3) * ``'mJy'`` : sigma_noise = 10.**(-1) * ``'MJy/sr'`` : sigma_noise = 10.**(-10) seed : float, ``None`` When float seed fixes the random numbers to a certain sequence in order to create reproducible results. Default is ``None``. diagnostics : truetype When ``True`` noise array is stored in a fits file. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: add_noise' # add different noise with same mu and sigma to 3D cube val = self.val.copy() for i in range(len(self.wav)): if sigma_noise != 0. and sigma_noise != 0: if seed is not None: np.random.seed(seed=seed) noise = normal(mu_noise, sigma_noise, self.pixel) if sigma_noise == 0. or sigma_noise == 0: noise = np.zeros(self.pixel) val[:, :, i] = self.val[:,:, i] + noise if diagnostics is True: fits.writeto(self.name + '_process-output_SC-noise.fits', noise, clobber=True) # return SyntheticCube c = SyntheticCube(self) c.val = val c.stage = stage c.log.append(c.stage) return c def get_rough_sed(self): ''' Collapses the current val cube into 1D array (SED). Returns ------- sed : SyntheticSED ''' stage = 'SyntheticCube: get_rough_sed' # for MJy/sr convert first, add and then convert back if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2': s = ConvertUnits(wav=self.wav, val=self.val) self.val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec']) # collapse every slice to one scalar value rough_sed = np.sum(np.sum(self.val.copy(), axis=1), axis=0) if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2': s = ConvertUnits(wav=self.wav, val=rough_sed) rough_sed = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0]) # return SyntheticSED from .sed import SyntheticSED s = SyntheticSED(self) s.log.append(stage) s.stage = 'SyntheticSED: initial' s.log.append(s.stage) s.val = rough_sed return s def get_total_val(self, wav_1, wav_2): ''' Collapses the val of SyntheticCube within the boundaries wav_1 and wav_2 into a 0D value val. WARNING: This tool cannot replace convolve_filter! But it can be used to produce rough estimates in-between the processes. Parameters ---------- wav_1, wav_2 : float Boundaries in microns. Returns ------- val : SyntheticFlux ''' stage = 'SyntheticCube: get_total_val' # slices within boundaries are extracted, averaged collapsed to an 2D image and finally collpased to a single scalar value # for MJy/sr convert first, add and then convert back if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2': s = ConvertUnits(wav=self.wav, val=self.val) val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec']) else: val = self.val c = get_slices(wav=self.wav, val=val, wav_1=wav_1, wav_2=wav_2) i = average_collapse(val=c['val_short']) f_total = np.sum(i) # real limits within collaps wav_max = 10 ** (np.log10(self.wav[c['filter_index'][0]]) + self.spacing_wav / 2.) wav_min = 10 ** (np.log10(self.wav[c['filter_index'][-1]]) - self.spacing_wav / 2.) wav_total = central_wav(wav=[wav_min, wav_max]) if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2': s = ConvertUnits(wav=wav_total, val=f_total) f_total = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0]) # return SyntheticFlux from .flux import SyntheticFlux f = SyntheticFlux(self) f.log.append(stage) f.stage = 'SyntheticFlux: initial' f.log.append(f.stage) f.wav = np.array(wav_total) f.val = np.array(f_total) f.filter = {'name': 'val_tot', 'waf_0': wav_total, 'waf_min': wav_min, 'waf_max': wav_max} return f def plot_image(self, wav_interest, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None): ''' Plots a certain slice close the wav_interest. The wavelength interval of the chosen slice labels the plot. Parameters ---------- wav_interest : float, ``None`` * float : wavelength close to slice in microns. * ``None`` : Only if input_array is SyntheticImage like prefix : str Name of the image. Default naming chain is switched off. name : str Name of image within the default naming chain to distinguish the plot files. E. g. 'PSF_gaussian' mulit_cut : ``True``, ``None`` * ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%. * ``None`` : no mulit-plot is returned. Default is ``None``. single_cut : float, ``None`` * float : cut level for single plot of image slice between 0 and 100. * ``None`` : no single plot is returned. set_cut : tuple, ``None`` * tuple : set_cut(v_min, v_max) Minimal and maximal physical value of val in the colorbars. * ``None`` : no plot with minimal and maximal cut is returned. Default is ``None``. dpi : ``None``, scalar > 0 The resolution in dots per inch. ``None`` is default and will use the value valig.dpi in the matplotlibrc file. Returns ------- cube : SyntheticCube ''' stage = 'SyntheticCube: plot_image' if prefix is None and name is None: raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.') if prefix is not None: if multi_cut is True and (single_cut is not None or set_cut is not None): raise Exception('If prefix naming is enabled only one plotting option can be chosen.') elif multi_cut is None and (single_cut is not None and set_cut is not None): raise Exception('If prefix naming is enabled only one plotting option can be chosen.') plot = MakePlots(prefix=prefix, name=name, input_array=SyntheticCube(self), wav_interest=wav_interest, multi_cut=multi_cut, single_cut=single_cut, set_cut=set_cut, dpi=dpi) # return SyntheticCube c = SyntheticCube(self) c.stage = stage c.log.append(c.stage) return c @property def spacing_wav(self): ''' The property spacing_wav estimates the width of the logarithmic spaced wav entries. ''' if self.wav.ndim != 0: spacing_wav = np.log10(self.wav[0] / self.wav[-1]) / (len(self.wav) - 1) else: spacing_wav = None return spacing_wav @property def pixel(self): ''' The property pixel is a tuple which resembles the current pixel in a value val. ``pixel(x,y)`` are calls as follows: ``x = pixel[0]`` ``y = pixel[1]`` ''' if self.val.ndim in (0, 1): pixel = (None, None) if self.val.ndim in (2, 3): pixel = (self.val.shape[0], self.val.shape[1]) return pixel @property def shape(self): ''' The property shape is a string, which resembles the current shape of the value val. scalar: ``'()'`` 1D: ``'(wav)'`` 2D: ``'(x, y)'`` 3D: ``'(x, y , wav)'`` ''' if self.val.ndim == 0: shape = '()' if self.val.ndim == 1: shape = '(wav)' if self.val.ndim == 2: shape = '(x, y)' if self.val.ndim == 3: shape = '(x, y, wav)' return shape @property def resolution(self): ''' The property resolution tells you the current resolution. If we are already in the SED or flux everything is considered as one large pixel. resolution in arcsec per pixel : ``resolution['arcsec']`` resolution in rad per pixel : ``resolution['rad']`` ''' resolution = {} if self.pixel[0] is None: resolution['rad'] = self.FOV[0] / 1. / self.distance else: resolution['rad'] = self.FOV[0] / self.pixel[0] / self.distance resolution['arcsec'] = np.degrees(resolution['rad']) * 3600 return resolution
bsd-2-clause
-1,980,972,553,126,057,000
31.483204
180
0.532655
false
3.870978
false
false
false
manimaul/MX-Cart
buildLin.py
1
4497
import os, hashlib from shutil import copy2 as copy from Resources import versionNum from subprocess import Popen from shutil import rmtree def md5sum(fd, block_size=2**20): md5 = hashlib.md5() while True: data = fd.read(block_size) if not data: break md5.update(data) return md5.hexdigest() #clean any previous mPath = os.path.dirname(__file__)+"/build/debpkg/" if os.path.isdir(mPath): rmtree(mPath) #create DEBIAN directory mPath = os.path.dirname(__file__)+"/build/debpkg/DEBIAN" if not os.path.isdir(mPath): os.makedirs(mPath) #write control file control = open(mPath+"/control", "w") control.write( "Package: MXCart\n" + \ "Version: %s\n" %(versionNum) + \ "Section: misc\n" + \ "Priority: optional\n" + \ "Architecture: all\n" + \ "Depends: pngnq, python, python-wxgtk2.8, python-imaging, python-gdal, python-pyproj, python-simplejson, python-shapely\n" + \ "Installed-Size: 331\n" + \ "Maintainer: Will Kamp\n" + \ "Description: BSB version 2 and 3 chart import utility for MX Mariner\n" ) control.close() #copy over needed python files mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/lib/mxcart/" if not os.path.isdir(mPath): os.makedirs(mPath) for pyFile in ["/BsbHeader.py", "/buildWin.py", "/GUI.py", "/MXCart.py", "/BsbScales.py", \ "/BsbOutlines.py", "/FilePathSearch.py", "/Helper_Gdal.py", "/MyGemfBuilder.py", \ "/Helper_Tiler.py", "/Helper_Merge.py", "/Resources.py", "/FindZoom.py", "/GenerateData.py", \ "/reader_bsb_data.csv", "/my_tilers_tools/viewer-google.html", "/my_tilers_tools/viewer-openlayers.html"]: #print os.path.dirname(__file__)+pyFile, mPath copy(os.path.dirname(__file__)+pyFile, mPath) mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/lib/mxcart/my_tilers_tools/" if not os.path.isdir(mPath): os.makedirs(mPath) for pyFile in ["/my_tilers_tools/gdal_tiler.py", \ "/my_tilers_tools/generate_efficient_map_file.py", \ "/my_tilers_tools/map2gdal.py", \ "/my_tilers_tools/reader_backend.py", \ "/my_tilers_tools/reader_bsb.py", \ "/my_tilers_tools/tiler_functions.py", \ "/my_tilers_tools/tiles_convert.py", \ "/my_tilers_tools/tiles_merge_simple.py" ]: #print os.path.dirname(__file__)+pyFile, mPath copy(os.path.dirname(__file__)+pyFile, mPath) #copy dependant images mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/share/mxcart/" if not os.path.isdir(mPath): os.makedirs(mPath) for pyFile in ["/kattegat.png", "/spinner.gif"]: #print os.path.dirname(__file__)+pyFile, mPath copy(os.path.dirname(__file__)+pyFile, mPath) mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/share/icons/hicolor/48x48/apps/" if not os.path.isdir(mPath): os.makedirs(mPath) copy(os.path.dirname(__file__)+"/mxcart.png", mPath) #create bin mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/bin" if not os.path.isdir(mPath): os.makedirs(mPath) binsh = open(mPath + "/mxcart", "w") binsh.write("#!/bin/bash\n\n" + \ "cd /usr/local/lib/mxcart\n" + \ "python MXCart.py\n") binsh.close() Popen(["chmod", "777", mPath + "/mxcart"]) #create desktop entry mPath = os.path.dirname(__file__)+"/build/debpkg/usr/local/share/applications" if not os.path.isdir(mPath): os.makedirs(mPath) desktop = open(mPath + "/mxcart.desktop", "w") desktop.write("[Desktop Entry]\n" + \ "Version=%s\n" %(versionNum) + \ "Name=MX Cart\n" + \ "Comment=BSB Chart Import Utility\n" + \ "Path=/usr/local/lib/mxcart/\n" + \ "Exec=mxcart\n" + \ "Icon=/usr/local/share/icons/hicolor/48x48/apps/mxcart.png\n" + \ "StartupNotify=true\n" + \ "Terminal=false\n" + \ "Type=Application\n" + \ "Categories=Education;Science;Geography;" ) desktop.close() Popen(["dpkg-deb", "-b", os.path.dirname(__file__)+"/build/debpkg", os.path.dirname(__file__)+"/build/MXCart_%s_.deb" %(versionNum)]) ##write md5sum file #mPath = os.path.dirname(__file__)+"/build/debpkg/DEBIAN" #md5sums = open(mPath+"/md5sums", "w") #for ea in os.listdir(os.path.dirname(__file__)+"/build/debpkg/usr/local/lib/mxcart/"): # fd = open( os.path.dirname(__file__)+"/build/debpkg/usr/local/lib/mxcart/"+ea, "rb" ) # md5sums.write(md5sum(fd) + " " + "/usr/local/lib/mxcart/"+ea+"\n") # fd.close() ##for fd in os #md5sums.close()
bsd-2-clause
551,775,681,851,644,600
38.104348
133
0.632866
false
2.78452
false
false
false
jumoconnect/openjumo
jumodjango/cust_admin/templatetags/ext_admin_list.py
1
5314
import datetime from django.conf import settings from django.contrib.admin.util import lookup_field, display_for_field, label_for_field from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR from django.core.exceptions import ObjectDoesNotExist from django.db import models from django.forms.forms import pretty_name from django.utils import formats from django.template.defaultfilters import escapejs from django.utils.html import escape, conditional_escape from django.utils.safestring import mark_safe from django.utils.text import capfirst from django.utils.translation import ugettext as _ from django.utils.encoding import smart_unicode, force_unicode from django.template import Library from django.contrib.admin.templatetags.admin_list import result_headers, result_hidden_fields register = Library() """ All this was copy and pasted so the custDismissRelatedLookupPopup could be inserted here. Do a find. """ def ext_items_for_result(cl, result, form): """ Generates the actual list of data. """ first = True pk = cl.lookup_opts.pk.attname for field_name in cl.list_display: row_class = '' try: f, attr, value = lookup_field(field_name, result, cl.model_admin) except (AttributeError, ObjectDoesNotExist): result_repr = EMPTY_CHANGELIST_VALUE else: if f is None: allow_tags = getattr(attr, 'allow_tags', False) boolean = getattr(attr, 'boolean', False) if boolean: allow_tags = True result_repr = _boolean_icon(value) else: result_repr = smart_unicode(value) # Strip HTML tags in the resulting text, except if the # function has an "allow_tags" attribute set to True. if not allow_tags: result_repr = escape(result_repr) else: result_repr = mark_safe(result_repr) else: if value is None: result_repr = EMPTY_CHANGELIST_VALUE if isinstance(f.rel, models.ManyToOneRel): result_repr = escape(getattr(result, f.name)) else: result_repr = display_for_field(value, f) if isinstance(f, models.DateField) or isinstance(f, models.TimeField): row_class = ' class="nowrap"' if force_unicode(result_repr) == '': result_repr = mark_safe('&nbsp;') # If list_display_links not defined, add the link tag to the first field if (first and not cl.list_display_links) or field_name in cl.list_display_links: table_tag = {True:'th', False:'td'}[first] first = False url = cl.url_for_result(result) # Convert the pk to something that can be used in Javascript. # Problem cases are long ints (23L) and non-ASCII strings. if cl.to_field: attr = str(cl.to_field) else: attr = pk value = result.serializable_value(attr) result_id = repr(force_unicode(value))[1:] #All this was copy and pasted so the custDismissRelatedLookupPopup could be inserted here. ext_attrib = "" if cl.is_popup: if cl.is_ext_popup: ext_attrib = 'onclick="opener.custDismissRelatedLookupPopup(window, %s, \'%s\'); return false;"' % (result_id, escapejs(result_repr)) else: ext_attrib = ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id yield mark_safe(u'<%s%s><a href="%s" %s>%s</a></%s>' % \ (table_tag, row_class, url, ext_attrib, result_repr, table_tag)) else: # By default the fields come from ModelAdmin.list_editable, but if we pull # the fields out of the form instead of list_editable custom admins # can provide fields on a per request basis if form and field_name in form.fields: bf = form[field_name] result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf)) else: result_repr = conditional_escape(result_repr) yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr)) if form and not form[cl.model._meta.pk.name].is_hidden: yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name])) def ext_results(cl): if cl.formset: for res, form in zip(cl.result_list, cl.formset.forms): yield list(ext_items_for_result(cl, res, form)) else: for res in cl.result_list: yield list(ext_items_for_result(cl, res, None)) def ext_result_list(cl): """ Displays the headers and data list together """ return {'cl': cl, 'result_hidden_fields': list(result_hidden_fields(cl)), 'result_headers': list(result_headers(cl)), 'results': list(ext_results(cl))} ext_result_list = register.inclusion_tag("admin/change_list_results.html")(ext_result_list)
mit
-7,825,369,702,562,516,000
42.917355
153
0.605947
false
3.968633
false
false
false
vhernandez/jwsProcessor
src/jwsprocessor/fc_calculator.py
1
7488
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ To convert from mdeg to molar elipticity (deg*cm2*dmol-1): m.e. = mdeg / (10*l*(C/MW)*Rn) where l = light path in cm C = concentration in mg/ml MW = molecular weight Rn = number of residue of the protein """ import pygtk pygtk.require("2.0") import gtk import gobject from tools import _ CU_WEIGHT_VOL = 0 CU_MICROMOLAR = 1 CU_MILIMOLAR = 2 CONC_UNITS_LIST = [CU_WEIGHT_VOL, CU_MICROMOLAR, CU_MILIMOLAR] class ProteinInfo: def __init__(self, name, molecular_weight, residue_number, def_lp = 0.1, def_c=0.1, def_c_units = CU_WEIGHT_VOL): self.name = name self.molecular_weight = molecular_weight self.residue_number = residue_number self.default_light_path = def_lp self.default_concentration = def_c if def_c_units in CONC_UNITS_LIST: self.default_conc_units = def_c_units else: self.default_conc_units = CU_WEIGHT_VOL def get_c_units(self): if not self.default_conc_units in CONC_UNITS_LIST: self.default_conc_units = CU_WEIGHT_VOL return self.default_conc_units class CDCorrectionFactorCalculator(gtk.Dialog): c_text = _("Concentration (%s):") def __init__(self, initial_params=ProteinInfo("C-LytA", 15840, 136), parent=None): gtk.Dialog.__init__(self, title=_("Calculate correction factor"), parent=parent, flags=gtk.DIALOG_MODAL, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) self._create_widgets() self._configure_widgets(initial_params) self.protein_info = initial_params self.correction_factor = 0 def _calculate_fc(self): cu = self.c_units_combo.get_active() if cu ==CU_MICROMOLAR: # uM C = self.C_spinner.get_value() / 1000000.0 elif cu ==CU_MILIMOLAR: #mM C = self.C_spinner.get_value() / 1000.0 else: #mg/ml MW = self.MW_spinner.get_value() if MW != 0.0: C = self.C_spinner.get_value() / MW else: C = 0.0 LP = self.LP_spinner.get_value() Rn = self.Rn_spinner.get_value() FC_0 = 10*LP*C*Rn if FC_0 != 0: self.correction_factor = 1.0/FC_0 else: self.correction_factor = 0.0 return self.correction_factor def _c_units_changed_cb(self, widget): cu = self.c_units_combo.get_active() if cu ==CU_MICROMOLAR: text = self.c_text % "uM" self.C_spinner.set_increments(0.1, 1.0) elif cu ==CU_MILIMOLAR: text = self.c_text % "mM" self.C_spinner.set_increments(0.01, 0.1) else: text = self.c_text % "mg/ml" self.C_spinner.set_increments(0.01, 0.1) self.C_label.set_text(text) self._update_factor_cb(widget) def _copy_to_clipboard_cb(self, widget): clipboard = gtk.Clipboard() clipboard.set_text("%f" % self._calculate_fc()) def _update_factor_cb(self, widget): self.factor_entry.set_text("%f" % self._calculate_fc()) def _configure_widgets(self, protein_info): self.LP_spinner.set_value(protein_info.default_light_path) self.C_spinner.set_value(protein_info.default_concentration) self.c_units_combo.set_active(protein_info.get_c_units()) self._c_units_changed_cb(self.c_units_combo) self.MW_spinner.set_value(protein_info.molecular_weight) self.Rn_spinner.set_value(protein_info.residue_number) self._update_factor_cb(self) self.c_units_combo.connect("changed", self._c_units_changed_cb) self.LP_spinner.connect("value-changed", self._update_factor_cb ) self.C_spinner.connect("value-changed", self._update_factor_cb ) self.MW_spinner.connect("value-changed", self._update_factor_cb ) self.Rn_spinner.connect("value-changed", self._update_factor_cb ) def _create_widgets(self): def create_label(label): l = gtk.Label(label) l.set_alignment(0,0.5) l.set_use_markup(True) return l self.LP_spinner = gtk.SpinButton() self.LP_spinner.set_range(0.0,10.0) self.LP_spinner.set_digits(2) self.LP_spinner.set_increments(0.01, 0.1) self.C_label = create_label(_("Concentration (mg/ml):")) self.C_spinner = gtk.SpinButton() self.C_spinner.set_range(0.0,50.0) self.C_spinner.set_digits(4) self.C_spinner.set_increments(0.01, 0.1) self.MW_spinner = gtk.SpinButton() self.MW_spinner.set_range(1.0,1000000000000.0) self.MW_spinner.set_digits(2) self.MW_spinner.set_increments(10.0, 100.0) self.Rn_spinner = gtk.SpinButton() self.Rn_spinner.set_range(1.0,1000000000000.0) self.Rn_spinner.set_digits(0) self.Rn_spinner.set_increments(1.0, 10.0) self.factor_entry = gtk.Entry() self.factor_entry.props.editable = False self.factor_entry.set_text("%f" % 0.0) self.c_units_list = gtk.ListStore(str) self.c_units_list.append(["m:v (mg/ml)"]) self.c_units_list.append(["micromolar"]) self.c_units_list.append(["milimolar"]) cell = gtk.CellRendererText() self.c_units_combo = gtk.ComboBox(self.c_units_list) self.c_units_combo.pack_start(cell, True) self.c_units_combo.add_attribute(cell, 'text', 0) self.c_units_combo.set_active(0) self.copy_to_clipboard_btn = gtk.Button(stock=gtk.STOCK_COPY) self.copy_to_clipboard_btn.connect("clicked", self._copy_to_clipboard_cb) table = gtk.Table(6,2) table.set_row_spacings(3) table.set_col_spacings(3) table.attach(create_label(_("Light path (cm):")), 0,1,0,1, gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.LP_spinner, 1,2,0,1, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.c_units_combo, 0,2,1,2, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.C_label, 0,1,2,3, gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.C_spinner, 1,2,2,3, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(create_label(_("Molecular weight (g/mol):")), 0,1,3,4, gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.MW_spinner, 1,2,3,4, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(create_label(_("Residue number:")), 0,1,4,5, gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(self.Rn_spinner, 1,2,4,5, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL) table.attach(create_label(_("<b>Correction factor:</b>")), 0,1,5,6, gtk.FILL, gtk.EXPAND|gtk.FILL,0,5) table.attach(self.factor_entry, 1,2,5,6, gtk.EXPAND|gtk.FILL, gtk.EXPAND|gtk.FILL,0,5) self.vbox.pack_start(table, False, False, 4) self.action_area.pack_end(self.copy_to_clipboard_btn, False, False, 0) self.set_border_width(2) self.show_all() if __name__=="__main__": w = CDCorrectionFactorCalculator() w.run()
gpl-2.0
4,273,316,415,626,022,400
38.619048
89
0.581197
false
3.018138
false
false
false
cropleyb/pentai
pentai/db/ai_factory.py
1
2712
#!/usr/bin/env python import pentai.ai.ai_player as aip_m from pentai.ai.priority_filter import * from pentai.ai.priority_filter_2 import * import pentai.db.openings_book as ob_m import pentai.db.games_mgr from pentai.ai.ai_genome import * class AIFactory: # TODO: These are just functions def create_player(self, genome): filter_num = genome.filter_num if filter_num == 1: sf = PriorityFilter() elif filter_num == 2: sf = PriorityFilter2() sf.set_max_moves_per_depth_level(mmpdl=genome.mmpdl, narrowing=genome.narrowing, chokes=genome.chokes) try: vision = genome.vision except AttributeError: vision = 100 sf.set_vision(vision) try: p_name = genome.p_name except AttributeError: p_name = genome.name p = aip_m.AIPlayer(sf, p_name=p_name) try: p_key = genome.p_key except AttributeError: p_key = genome.key p.p_key = p_key try: p.bl_cutoff = genome.bl_cutoff except AttributeError: p.bl_cutoff = False ob = ob_m.instance if not ob: ob = ob_m.OpeningsBook() ob_m.instance = ob p.set_use_openings_book(ob) p.force_depth = genome.force_depth p.set_max_depth(genome.max_depth + genome.max_depth_boost) self.set_utility_config(genome, p) p.genome = genome return p def set_utility_config(self, genome, player): uc = player.get_utility_calculator() uc.capture_score_base = genome.capture_score_base uc.take_score_base = genome.take_score_base uc.threat_score_base = genome.threat_score_base uc.captures_scale = genome.captures_scale uc.move_factor = genome.move_factor uc.length_factor = genome.length_factor # TODO: Cull uc.use_net_captures = genome.use_net_captures uc.length_scale = genome.length_scale uc.scale_pob = genome.scale_pob uc.calc_mode = genome.calc_mode try: uc.enclosed_four_base = genome.enclosed_four_base except: uc.enclosed_four_base = genome.enclosed_four_base = 400 uc.judgement = genome.judgement try: uc.checkerboard_value = genome.checkerboard_value except: uc.checkerboard_value = 0 ''' # Example of how to handle new fields: try: uc.length_scale = genome.length_scale except: uc.length_scale = genome.length_scale = [1,1,1,1,1,1] # Then run upgrade_dbs.py '''
mit
-9,205,106,758,263,038,000
27.547368
88
0.581121
false
3.454777
false
false
false
atbentley/plank
setup.py
1
1311
import os import re from setuptools import setup as setup def read(path): global os with open(os.path.join(os.path.dirname(__file__), path), 'r') as f: data = f.read() return data.strip() def get_version(): global os, re, read _version_re = re.compile(r'\s*__version__\s*=\s*\'(.*)\'\s*') return _version_re.findall(read(os.path.join('plank', '__init__.py')))[0] install_requires = read('requirements.txt').split('\n') test_requires = read('build-requirements.txt').split('\n') test_requires.extend(install_requires) setup( name='plank', version=get_version(), url='http://github.com/atbentley/plank/', license='MIT', author='Andrew Bentley', author_email='[email protected]', description="A simple task and build runner that doesn't get in the way.", long_description=read('README.rst'), packages=['plank'], entry_points={'console_scripts': ['plank = plank.cli:main']}, include_package_data=True, zip_safe=False, platforms='any', install_requires=install_requires, tests_require=test_requires, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5' ] )
mit
8,118,092,918,294,471,000
28.133333
78
0.633105
false
3.459103
false
false
false
d33tah/macd
macd/views.py
1
3323
import datetime import subprocess from macd.models import SeenEvent, Device from django.shortcuts import render from django.utils import timezone def index(request): now = timezone.now() time_threshold = now - datetime.timedelta(minutes=10) items = SeenEvent.objects.filter(date__gte=time_threshold) devices_set = set(item.mac.device for item in items if not item.mac.device.ignored) devices = [] two_minutes = now - datetime.timedelta(minutes=2) macdb = open("/usr/share/nmap/nmap-mac-prefixes").readlines() for device in devices_set: found_2min = False earliest_since = None macs = device.mac_set.all() items_for_mac = SeenEvent.objects.filter(mac__in=macs)[:10000] if len(items_for_mac) > 0: for i in range(1, len(items_for_mac)): curr, previous = items_for_mac[i].date, items_for_mac[i-1].date difference = previous - curr if earliest_since is None or previous < earliest_since: earliest_since = previous if difference > datetime.timedelta(minutes=10): break if items_for_mac[0].date > two_minutes: found_2min = True name = str(device) vendor = '' if name.endswith(" (?)") and len(name) == 21: mac_name = name.upper().replace(":","")[:6] vendor = [" ".join(i.split()[1:]) for i in macdb if i.split()[0] == mac_name][0] devices += [{ 'leaving': found_2min, 'name': name, 'vendor': vendor, 'since': timezone.localtime(earliest_since) if earliest_since else '' }] last_event_time = SeenEvent.objects.latest('date').date viewer_ip = request.META['REMOTE_ADDR'] viewer_ip = '192.168.88.1' viewer_mac = '' if (viewer_ip.startswith('192.168.') or viewer_ip.startswith('172.16.') or viewer_ip.startswith('10.')): arp_output = subprocess.check_output(['/usr/sbin/arp', '-n']) arp_data_lines = [i for i in arp_output.split("\n")[1:] if i!=''] arp_macs = {cols[0]: cols[2] for line in arp_data_lines for cols in [line.split()]} viewer_mac = arp_macs.get(viewer_ip, '') viewer_mac_unknown = list(Device.objects.filter(description='', mac=viewer_mac)) viewer_mac_unknown = True viewer_mac = 'test' return render(request, 'macd/index.html', { 'devices': devices, 'last_event': timezone.localtime(last_event_time), 'viewer_mac': viewer_mac if viewer_mac_unknown else None, }) def unknown(request): macs = [m for d in Device.objects.filter(description='') for m in d.mac_set.all()] devices_dict = {mac: len(SeenEvent.objects.filter(mac=mac)) for mac in macs} devices = ["%s: %s" % (k, v) for k, v in reversed(sorted(devices_dict.items(), key=lambda x: x[1])) ] return render(request, 'macd/index.html', { 'devices': devices, 'last_event': timezone.localtime(last_event_time) })
gpl-3.0
-2,758,185,787,429,000,700
37.195402
81
0.547397
false
3.784738
false
false
false
Esri/raster-functions
functions/BlockStatistics.py
1
4494
import numpy as np from skimage.transform import resize from skimage.util import view_as_blocks class BlockStatistics(): def __init__(self): self.name = "Block Statistics Function" self.description = ("Generates a downsampled output raster by computing a statistical " "measure over non-overlapping square blocks of pixels in the input raster.") self.func = np.mean self.padding = 0 def getParameterInfo(self): return [ { 'name': 'raster', 'dataType': 'raster', 'value': None, 'required': True, 'displayName': "Input Raster", 'description': "The primary input raster over which block statistics is computed." }, { 'name': 'size', 'dataType': 'numeric', 'value': 1, 'required': False, 'displayName': "Block Size", 'description': ("The number of pixels along each side of the square " "non-overlapping block.") }, { 'name': 'measure', 'dataType': 'string', 'value': 'Mean', 'required': False, 'displayName': "Measure", 'domain': ('Minimum', 'Maximum', 'Mean', 'Median', 'Sum', 'Nearest'), 'description': ("The statistical measure computed over each " "block of pixels in the input raster.") }, { 'name': 'factor', 'dataType': 'numeric', 'value': 1, 'required': False, 'displayName': "Downsampling Factor", 'description': ("The integer factor by which the output raster is " "downsampled relative to the input raster.") }, ] def getConfiguration(self, **scalars): s = scalars.get('size', None) s = 3 if s is None else s self.padding = int(s / 2) return { 'samplingFactor': scalars.get('size', 1.0), 'inheritProperties': 4 | 8, # inherit everything but the pixel type (1) and NoData (2) 'invalidateProperties': 2 | 4 | 8, # invalidate histogram, statistics, and key metadata 'inputMask': True, 'resampling': False, 'padding': self.padding, } def updateRasterInfo(self, **kwargs): f = kwargs.get('factor', 1.0) kwargs['output_info']['cellSize'] = tuple(np.multiply(kwargs['raster_info']['cellSize'], f)) kwargs['output_info']['pixelType'] = 'f4' # output pixels values are floating-point kwargs['output_info']['statistics'] = () kwargs['output_info']['histogram'] = () m = kwargs.get('measure') m = m.lower() if m is not None and len(m) else 'mean' if m == 'minimum': self.func = np.min elif m == 'maximum': self.func = np.max elif m == 'mean': self.func = np.mean elif m == 'median': self.func = np.median elif m == 'sum': self.func = np.sum elif m == 'nearest': self.func = None return kwargs def updatePixels(self, tlc, shape, props, **pixelBlocks): p = pixelBlocks['raster_pixels'] m = pixelBlocks['raster_mask'] if self.func is None: b = resize(p, shape, order=0, preserve_range=True) else: blockSizes = tuple(np.divide(p.shape, shape)) b = np.ma.masked_array(view_as_blocks(p, blockSizes), view_as_blocks(~m.astype('b1'), blockSizes)) for i in range(len(b.shape) // 2): b = self.func(b, axis=-1) b = b.data d = self.padding pixelBlocks['output_pixels'] = b.astype(props['pixelType'], copy=False) pixelBlocks['output_mask'] = resize(m, shape, order=0, preserve_range=True).astype('u1', copy=False) return pixelBlocks def updateKeyMetadata(self, names, bandIndex, **keyMetadata): if bandIndex == -1: keyMetadata['datatype'] = 'Processed' return keyMetadata
apache-2.0
-8,694,753,335,761,033,000
37.078261
110
0.491322
false
4.467197
false
false
false

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
14
Add dataset card