id
stringlengths
28
33
content
stringlengths
14
265k
max_stars_repo_path
stringlengths
49
55
crossvul-python_data_bad_3565_0
import json import webob from xml.dom import minidom from xml.parsers import expat import faults from nova import exception from nova import log as logging from nova import utils from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') class Request(webob.Request): """Add some Openstack API-specific logic to the base webob.Request.""" def best_match_content_type(self, supported_content_types=None): """Determine the requested response content-type. Based on the query extension then the Accept header. """ supported_content_types = supported_content_types or \ ('application/json', 'application/xml') parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported_content_types: return ctype bm = self.accept.best_match(supported_content_types) # default to application/json if we don't find a preference return bm or 'application/json' def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if not "Content-Type" in self.headers: return None allowed_types = ("application/xml", "application/json") content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return utils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = minidom.parseString(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node""" if len(node.childNodes) == 1: child = node.childNodes[0] if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" def default(self, datastring): return {'body': self._from_xml(datastring)} class MetadataXMLDeserializer(XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request""" metadata = {} if metadata_node is not None: for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None, supported_content_types=None): self.supported_content_types = supported_content_types or \ ('application/json', 'application/xml') self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = headers_deserializer or \ RequestHeadersDeserializer() def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns tuple of expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug(_("Unrecognized Content-Type provided in request")) return {} if content_type is None: LOG.debug(_("No Content-Type provided in request")) return {} if not len(request.body) > 0: LOG.debug(_("Empty body provided in request")) return {} try: deserializer = self.get_body_deserializer(content_type) except exception.InvalidContentType: LOG.debug(_("Unable to deserialize body as provided Content-Type")) raise return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type(self.supported_content_types) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class DictSerializer(ActionDispatcher): """Default request body serialization""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization""" def default(self, data): return utils.dumps(data) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = data.keys()[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toprettyxml(indent=' ', encoding='UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) #TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) #TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in data.items(): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = headers_serializer or \ ResponseHeadersSerializer() def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that can serialize the output of a controller into a webob response :param serializer: object that can deserialize a webob request into necessary pieces """ self.controller = controller self.deserializer = deserializer or RequestDeserializer() self.serializer = serializer or ResponseSerializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s" % {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) project_id = args.pop("project_id", None) if 'nova.context' in request.environ and project_id: request.environ['nova.context'].project_id = project_id try: action_result = self.dispatch(request, action, args) except faults.Fault as ex: LOG.info(_("Fault thrown: %s"), unicode(ex)) action_result = ex except webob.exc.HTTPException as ex: LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) action_result = faults.Fault(ex) if type(action_result) is dict or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError, e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s" % msg_dict) LOG.info(msg) return response def dispatch(self, request, action, action_args): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) try: return controller_method(req=request, **action_args) except TypeError as exc: LOG.exception(exc) return faults.Fault(webob.exc.HTTPBadRequest())
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3565_0
crossvul-python_data_good_4833_0
#!/usr/bin/env python2 # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import time, textwrap, json from bisect import bisect_right from base64 import b64encode from future_builtins import map from threading import Thread from Queue import Queue, Empty from functools import partial from urlparse import urlparse from PyQt5.Qt import ( QWidget, QVBoxLayout, QApplication, QSize, QNetworkAccessManager, QMenu, QIcon, QNetworkReply, QTimer, QNetworkRequest, QUrl, Qt, QNetworkDiskCache, QToolBar, pyqtSlot, pyqtSignal) from PyQt5.QtWebKitWidgets import QWebView, QWebInspector, QWebPage from calibre import prints from calibre.constants import iswindows from calibre.ebooks.oeb.polish.parsing import parse from calibre.ebooks.oeb.base import serialize, OEB_DOCS from calibre.ptempfile import PersistentTemporaryDirectory from calibre.gui2 import error_dialog, open_url, NO_URL_FORMATTING from calibre.gui2.tweak_book import current_container, editors, tprefs, actions, TOP from calibre.gui2.viewer.documentview import apply_settings from calibre.gui2.viewer.config import config from calibre.gui2.widgets2 import HistoryLineEdit2 from calibre.utils.ipc.simple_worker import offload_worker shutdown = object() def get_data(name): 'Get the data for name. Returns a unicode string if name is a text document/stylesheet' if name in editors: return editors[name].get_raw_data() return current_container().raw_data(name) # Parsing of html to add linenumbers {{{ def parse_html(raw): root = parse(raw, decoder=lambda x:x.decode('utf-8'), line_numbers=True, linenumber_attribute='data-lnum') return serialize(root, 'text/html').encode('utf-8') class ParseItem(object): __slots__ = ('name', 'length', 'fingerprint', 'parsing_done', 'parsed_data') def __init__(self, name): self.name = name self.length, self.fingerprint = 0, None self.parsed_data = None self.parsing_done = False def __repr__(self): return 'ParsedItem(name=%r, length=%r, fingerprint=%r, parsing_done=%r, parsed_data_is_None=%r)' % ( self.name, self.length, self.fingerprint, self.parsing_done, self.parsed_data is None) class ParseWorker(Thread): daemon = True SLEEP_TIME = 1 def __init__(self): Thread.__init__(self) self.requests = Queue() self.request_count = 0 self.parse_items = {} self.launch_error = None def run(self): mod, func = 'calibre.gui2.tweak_book.preview', 'parse_html' try: # Connect to the worker and send a dummy job to initialize it self.worker = offload_worker(priority='low') self.worker(mod, func, '<p></p>') except: import traceback traceback.print_exc() self.launch_error = traceback.format_exc() return while True: time.sleep(self.SLEEP_TIME) x = self.requests.get() requests = [x] while True: try: requests.append(self.requests.get_nowait()) except Empty: break if shutdown in requests: self.worker.shutdown() break request = sorted(requests, reverse=True)[0] del requests pi, data = request[1:] try: res = self.worker(mod, func, data) except: import traceback traceback.print_exc() else: pi.parsing_done = True parsed_data = res['result'] if res['tb']: prints("Parser error:") prints(res['tb']) else: pi.parsed_data = parsed_data def add_request(self, name): data = get_data(name) ldata, hdata = len(data), hash(data) pi = self.parse_items.get(name, None) if pi is None: self.parse_items[name] = pi = ParseItem(name) else: if pi.parsing_done and pi.length == ldata and pi.fingerprint == hdata: return pi.parsed_data = None pi.parsing_done = False pi.length, pi.fingerprint = ldata, hdata self.requests.put((self.request_count, pi, data)) self.request_count += 1 def shutdown(self): self.requests.put(shutdown) def get_data(self, name): return getattr(self.parse_items.get(name, None), 'parsed_data', None) def clear(self): self.parse_items.clear() def is_alive(self): return Thread.is_alive(self) or (hasattr(self, 'worker') and self.worker.is_alive()) parse_worker = ParseWorker() # }}} # Override network access to load data "live" from the editors {{{ class NetworkReply(QNetworkReply): def __init__(self, parent, request, mime_type, name): QNetworkReply.__init__(self, parent) self.setOpenMode(QNetworkReply.ReadOnly | QNetworkReply.Unbuffered) self.setRequest(request) self.setUrl(request.url()) self._aborted = False if mime_type in OEB_DOCS: self.resource_name = name QTimer.singleShot(0, self.check_for_parse) else: data = get_data(name) if isinstance(data, type('')): data = data.encode('utf-8') mime_type += '; charset=utf-8' self.__data = data self.setHeader(QNetworkRequest.ContentTypeHeader, mime_type) self.setHeader(QNetworkRequest.ContentLengthHeader, len(self.__data)) QTimer.singleShot(0, self.finalize_reply) def check_for_parse(self): if self._aborted: return data = parse_worker.get_data(self.resource_name) if data is None: return QTimer.singleShot(10, self.check_for_parse) self.__data = data self.setHeader(QNetworkRequest.ContentTypeHeader, 'application/xhtml+xml; charset=utf-8') self.setHeader(QNetworkRequest.ContentLengthHeader, len(self.__data)) self.finalize_reply() def bytesAvailable(self): try: return len(self.__data) except AttributeError: return 0 def isSequential(self): return True def abort(self): self._aborted = True def readData(self, maxlen): ans, self.__data = self.__data[:maxlen], self.__data[maxlen:] return ans read = readData def finalize_reply(self): if self._aborted: return self.setFinished(True) self.setAttribute(QNetworkRequest.HttpStatusCodeAttribute, 200) self.setAttribute(QNetworkRequest.HttpReasonPhraseAttribute, "Ok") self.metaDataChanged.emit() self.downloadProgress.emit(len(self.__data), len(self.__data)) self.readyRead.emit() self.finished.emit() class NetworkAccessManager(QNetworkAccessManager): OPERATION_NAMES = {getattr(QNetworkAccessManager, '%sOperation'%x) : x.upper() for x in ('Head', 'Get', 'Put', 'Post', 'Delete', 'Custom') } def __init__(self, *args): QNetworkAccessManager.__init__(self, *args) self.current_root = None self.cache = QNetworkDiskCache(self) self.setCache(self.cache) self.cache.setCacheDirectory(PersistentTemporaryDirectory(prefix='disk_cache_')) self.cache.setMaximumCacheSize(0) def createRequest(self, operation, request, data): url = unicode(request.url().toString(NO_URL_FORMATTING)) if operation == self.GetOperation and url.startswith('file://'): path = url[7:] if iswindows and path.startswith('/'): path = path[1:] c = current_container() try: name = c.abspath_to_name(path, root=self.current_root) except ValueError: # Happens on windows with absolute paths on different drives name = None if c.has_name(name): try: return NetworkReply(self, request, c.mime_map.get(name, 'application/octet-stream'), name) except Exception: import traceback traceback.print_exc() return QNetworkAccessManager.createRequest(self, operation, request, data) # }}} def uniq(vals): ''' Remove all duplicates from vals, while preserving order. ''' vals = vals or () seen = set() seen_add = seen.add return tuple(x for x in vals if x not in seen and not seen_add(x)) def find_le(a, x): 'Find rightmost value in a less than or equal to x' try: return a[bisect_right(a, x)] except IndexError: return a[-1] class WebPage(QWebPage): sync_requested = pyqtSignal(object, object, object) split_requested = pyqtSignal(object, object) def __init__(self, parent): QWebPage.__init__(self, parent) settings = self.settings() apply_settings(settings, config().parse()) settings.setMaximumPagesInCache(0) settings.setAttribute(settings.JavaEnabled, False) settings.setAttribute(settings.PluginsEnabled, False) settings.setAttribute(settings.PrivateBrowsingEnabled, True) settings.setAttribute(settings.JavascriptCanOpenWindows, False) settings.setAttribute(settings.JavascriptCanAccessClipboard, False) settings.setAttribute(settings.LocalContentCanAccessFileUrls, False) # ensure javascript cannot read from local files settings.setAttribute(settings.LinksIncludedInFocusChain, False) settings.setAttribute(settings.DeveloperExtrasEnabled, True) settings.setDefaultTextEncoding('utf-8') data = 'data:text/css;charset=utf-8;base64,' css = '[data-in-split-mode="1"] [data-is-block="1"]:hover { cursor: pointer !important; border-top: solid 5px green !important }' data += b64encode(css.encode('utf-8')) settings.setUserStyleSheetUrl(QUrl(data)) self.setNetworkAccessManager(NetworkAccessManager(self)) self.setLinkDelegationPolicy(self.DelegateAllLinks) self.mainFrame().javaScriptWindowObjectCleared.connect(self.init_javascript) self.init_javascript() @dynamic_property def current_root(self): def fget(self): return self.networkAccessManager().current_root def fset(self, val): self.networkAccessManager().current_root = val return property(fget=fget, fset=fset) def javaScriptConsoleMessage(self, msg, lineno, source_id): prints('preview js:%s:%s:'%(unicode(source_id), lineno), unicode(msg)) def init_javascript(self): if not hasattr(self, 'js'): from calibre.utils.resources import compiled_coffeescript self.js = compiled_coffeescript('ebooks.oeb.display.utils', dynamic=False) self.js += P('csscolorparser.js', data=True, allow_user_override=False) self.js += compiled_coffeescript('ebooks.oeb.polish.preview', dynamic=False) self._line_numbers = None mf = self.mainFrame() mf.addToJavaScriptWindowObject("py_bridge", self) mf.evaluateJavaScript(self.js) @pyqtSlot(str, str, str) def request_sync(self, tag_name, href, sourceline_address): try: self.sync_requested.emit(unicode(tag_name), unicode(href), json.loads(unicode(sourceline_address))) except (TypeError, ValueError, OverflowError, AttributeError): pass def go_to_anchor(self, anchor, lnum): self.mainFrame().evaluateJavaScript('window.calibre_preview_integration.go_to_anchor(%s, %s)' % ( json.dumps(anchor), json.dumps(str(lnum)))) @pyqtSlot(str, str) def request_split(self, loc, totals): actions['split-in-preview'].setChecked(False) loc, totals = json.loads(unicode(loc)), json.loads(unicode(totals)) if not loc or not totals: return error_dialog(self.view(), _('Invalid location'), _('Cannot split on the body tag'), show=True) self.split_requested.emit(loc, totals) @property def line_numbers(self): if self._line_numbers is None: def atoi(x): try: ans = int(x) except (TypeError, ValueError): ans = None return ans val = self.mainFrame().evaluateJavaScript('window.calibre_preview_integration.line_numbers()') self._line_numbers = sorted(uniq(filter(lambda x:x is not None, map(atoi, val)))) return self._line_numbers def go_to_line(self, lnum): try: lnum = find_le(self.line_numbers, lnum) except IndexError: return self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.go_to_line(%d)' % lnum) def go_to_sourceline_address(self, sourceline_address): lnum, tags = sourceline_address if lnum is None: return tags = [x.lower() for x in tags] self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.go_to_sourceline_address(%d, %s)' % (lnum, json.dumps(tags))) def split_mode(self, enabled): self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.split_mode(%s)' % ( 'true' if enabled else 'false')) class WebView(QWebView): def __init__(self, parent=None): QWebView.__init__(self, parent) self.inspector = QWebInspector(self) w = QApplication.instance().desktop().availableGeometry(self).width() self._size_hint = QSize(int(w/3), int(w/2)) self._page = WebPage(self) self.setPage(self._page) self.inspector.setPage(self._page) self.clear() self.setAcceptDrops(False) def sizeHint(self): return self._size_hint def refresh(self): self.pageAction(self.page().Reload).trigger() @dynamic_property def scroll_pos(self): def fget(self): mf = self.page().mainFrame() return (mf.scrollBarValue(Qt.Horizontal), mf.scrollBarValue(Qt.Vertical)) def fset(self, val): mf = self.page().mainFrame() mf.setScrollBarValue(Qt.Horizontal, val[0]) mf.setScrollBarValue(Qt.Vertical, val[1]) return property(fget=fget, fset=fset) def clear(self): self.setHtml(_( ''' <h3>Live preview</h3> <p>Here you will see a live preview of the HTML file you are currently editing. The preview will update automatically as you make changes. <p style="font-size:x-small; color: gray">Note that this is a quick preview only, it is not intended to simulate an actual ebook reader. Some aspects of your ebook will not work, such as page breaks and page margins. ''')) self.page().current_root = None def setUrl(self, qurl): self.page().current_root = current_container().root return QWebView.setUrl(self, qurl) def inspect(self): self.inspector.parent().show() self.inspector.parent().raise_() self.pageAction(self.page().InspectElement).trigger() def contextMenuEvent(self, ev): menu = QMenu(self) p = self.page() mf = p.mainFrame() r = mf.hitTestContent(ev.pos()) url = unicode(r.linkUrl().toString(NO_URL_FORMATTING)).strip() ca = self.pageAction(QWebPage.Copy) if ca.isEnabled(): menu.addAction(ca) menu.addAction(actions['reload-preview']) menu.addAction(QIcon(I('debug.png')), _('Inspect element'), self.inspect) if url.partition(':')[0].lower() in {'http', 'https'}: menu.addAction(_('Open link'), partial(open_url, r.linkUrl())) menu.exec_(ev.globalPos()) class Preview(QWidget): sync_requested = pyqtSignal(object, object) split_requested = pyqtSignal(object, object, object) split_start_requested = pyqtSignal() link_clicked = pyqtSignal(object, object) refresh_starting = pyqtSignal() refreshed = pyqtSignal() def __init__(self, parent=None): QWidget.__init__(self, parent) self.l = l = QVBoxLayout() self.setLayout(l) l.setContentsMargins(0, 0, 0, 0) self.view = WebView(self) self.view.page().sync_requested.connect(self.request_sync) self.view.page().split_requested.connect(self.request_split) self.view.page().loadFinished.connect(self.load_finished) self.inspector = self.view.inspector self.inspector.setPage(self.view.page()) l.addWidget(self.view) self.bar = QToolBar(self) l.addWidget(self.bar) ac = actions['auto-reload-preview'] ac.setCheckable(True) ac.setChecked(True) ac.toggled.connect(self.auto_reload_toggled) self.auto_reload_toggled(ac.isChecked()) self.bar.addAction(ac) ac = actions['sync-preview-to-editor'] ac.setCheckable(True) ac.setChecked(True) ac.toggled.connect(self.sync_toggled) self.sync_toggled(ac.isChecked()) self.bar.addAction(ac) self.bar.addSeparator() ac = actions['split-in-preview'] ac.setCheckable(True) ac.setChecked(False) ac.toggled.connect(self.split_toggled) self.split_toggled(ac.isChecked()) self.bar.addAction(ac) ac = actions['reload-preview'] ac.triggered.connect(self.refresh) self.bar.addAction(ac) actions['preview-dock'].toggled.connect(self.visibility_changed) self.current_name = None self.last_sync_request = None self.refresh_timer = QTimer(self) self.refresh_timer.timeout.connect(self.refresh) parse_worker.start() self.current_sync_request = None self.search = HistoryLineEdit2(self) self.search.initialize('tweak_book_preview_search') self.search.setPlaceholderText(_('Search in preview')) self.search.returnPressed.connect(partial(self.find, 'next')) self.bar.addSeparator() self.bar.addWidget(self.search) for d in ('next', 'prev'): ac = actions['find-%s-preview' % d] ac.triggered.connect(partial(self.find, d)) self.bar.addAction(ac) def find(self, direction): text = unicode(self.search.text()) self.view.findText(text, QWebPage.FindWrapsAroundDocument | ( QWebPage.FindBackward if direction == 'prev' else QWebPage.FindFlags(0))) def request_sync(self, tagname, href, lnum): if self.current_name: c = current_container() if tagname == 'a' and href: if href and href.startswith('#'): name = self.current_name else: name = c.href_to_name(href, self.current_name) if href else None if name == self.current_name: return self.view.page().go_to_anchor(urlparse(href).fragment, lnum) if name and c.exists(name) and c.mime_map[name] in OEB_DOCS: return self.link_clicked.emit(name, urlparse(href).fragment or TOP) self.sync_requested.emit(self.current_name, lnum) def request_split(self, loc, totals): if self.current_name: self.split_requested.emit(self.current_name, loc, totals) def sync_to_editor(self, name, sourceline_address): self.current_sync_request = (name, sourceline_address) QTimer.singleShot(100, self._sync_to_editor) def _sync_to_editor(self): if not actions['sync-preview-to-editor'].isChecked(): return try: if self.refresh_timer.isActive() or self.current_sync_request[0] != self.current_name: return QTimer.singleShot(100, self._sync_to_editor) except TypeError: return # Happens if current_sync_request is None sourceline_address = self.current_sync_request[1] self.current_sync_request = None self.view.page().go_to_sourceline_address(sourceline_address) def report_worker_launch_error(self): if parse_worker.launch_error is not None: tb, parse_worker.launch_error = parse_worker.launch_error, None error_dialog(self, _('Failed to launch worker'), _( 'Failed to launch the worker process used for rendering the preview'), det_msg=tb, show=True) def show(self, name): if name != self.current_name: self.refresh_timer.stop() self.current_name = name self.report_worker_launch_error() parse_worker.add_request(name) self.view.setUrl(QUrl.fromLocalFile(current_container().name_to_abspath(name))) return True def refresh(self): if self.current_name: self.refresh_timer.stop() # This will check if the current html has changed in its editor, # and re-parse it if so self.report_worker_launch_error() parse_worker.add_request(self.current_name) # Tell webkit to reload all html and associated resources current_url = QUrl.fromLocalFile(current_container().name_to_abspath(self.current_name)) self.refresh_starting.emit() if current_url != self.view.url(): # The container was changed self.view.setUrl(current_url) else: self.view.refresh() self.refreshed.emit() def clear(self): self.view.clear() self.current_name = None @property def current_root(self): return self.view.page().current_root @property def is_visible(self): return actions['preview-dock'].isChecked() @property def live_css_is_visible(self): try: return actions['live-css-dock'].isChecked() except KeyError: return False def start_refresh_timer(self): if self.live_css_is_visible or (self.is_visible and actions['auto-reload-preview'].isChecked()): self.refresh_timer.start(tprefs['preview_refresh_time'] * 1000) def stop_refresh_timer(self): self.refresh_timer.stop() def auto_reload_toggled(self, checked): if self.live_css_is_visible and not actions['auto-reload-preview'].isChecked(): actions['auto-reload-preview'].setChecked(True) error_dialog(self, _('Cannot disable'), _( 'Auto reloading of the preview panel cannot be disabled while the' ' Live CSS panel is open.'), show=True) actions['auto-reload-preview'].setToolTip(_( 'Auto reload preview when text changes in editor') if not checked else _( 'Disable auto reload of preview')) def sync_toggled(self, checked): actions['sync-preview-to-editor'].setToolTip(_( 'Disable syncing of preview position to editor position') if checked else _( 'Enable syncing of preview position to editor position')) def visibility_changed(self, is_visible): if is_visible: self.refresh() def split_toggled(self, checked): actions['split-in-preview'].setToolTip(textwrap.fill(_( 'Abort file split') if checked else _( 'Split this file at a specified location.\n\nAfter clicking this button, click' ' inside the preview panel above at the location you want the file to be split.'))) if checked: self.split_start_requested.emit() else: self.view.page().split_mode(False) def do_start_split(self): self.view.page().split_mode(True) def stop_split(self): actions['split-in-preview'].setChecked(False) def load_finished(self, ok): if actions['split-in-preview'].isChecked(): if ok: self.do_start_split() else: self.stop_split() def apply_settings(self): s = self.view.page().settings() s.setFontSize(s.DefaultFontSize, tprefs['preview_base_font_size']) s.setFontSize(s.DefaultFixedFontSize, tprefs['preview_mono_font_size']) s.setFontSize(s.MinimumLogicalFontSize, tprefs['preview_minimum_font_size']) s.setFontSize(s.MinimumFontSize, tprefs['preview_minimum_font_size']) sf, ssf, mf = tprefs['preview_serif_family'], tprefs['preview_sans_family'], tprefs['preview_mono_family'] s.setFontFamily(s.StandardFont, {'serif':sf, 'sans':ssf, 'mono':mf, None:sf}[tprefs['preview_standard_font_family']]) s.setFontFamily(s.SerifFont, sf) s.setFontFamily(s.SansSerifFont, ssf) s.setFontFamily(s.FixedFont, mf)
./CrossVul/dataset_final_sorted/CWE-264/py/good_4833_0
crossvul-python_data_bad_3784_0
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import json import re import urllib import webob.exc from glance.api import policy import glance.api.v2 as v2 from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import timeutils import glance.schema import glance.store LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.db_api.configure_db() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance.store self.store_api.create_stores() def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise webob.exc.HTTPForbidden() def _normalize_properties(self, image): """Convert the properties from the stored format to a dict The db api returns a list of dicts that look like {'name': <key>, 'value': <value>}, while it expects a format like {<key>: <value>} in image create and update calls. This function takes the extra step that the db api should be responsible for in the image get calls. The db api will also return deleted image properties that must be filtered out. """ properties = [(p['name'], p['value']) for p in image['properties'] if not p['deleted']] image['properties'] = dict(properties) return image def _extract_tags(self, image): try: #NOTE(bcwaldon): cast to set to make the list unique, then # cast back to list since that's a more sane response type return list(set(image.pop('tags'))) except KeyError: pass def _append_tags(self, context, image): image['tags'] = self.db_api.image_tag_get_all(context, image['id']) return image @utils.mutating def create(self, req, image): self._enforce(req, 'add_image') is_public = image.get('is_public') if is_public: self._enforce(req, 'publicize_image') image['owner'] = req.context.owner image['status'] = 'queued' tags = self._extract_tags(image) image = dict(self.db_api.image_create(req.context, image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image['id'], tags) image['tags'] = tags else: image['tags'] = [] image = self._normalize_properties(dict(image)) self.notifier.info('image.update', image) return image def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters={}): self._enforce(req, 'get_images') filters['deleted'] = False result = {} if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) try: images = self.db_api.image_get_all(req.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) if len(images) != 0 and len(images) == limit: result['next_marker'] = images[-1]['id'] except exception.InvalidFilterRangeValue as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.InvalidSortKey as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.NotFound as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) images = [self._normalize_properties(dict(image)) for image in images] result['images'] = [self._append_tags(req.context, image) for image in images] return result def _get_image(self, context, image_id): try: image = self.db_api.image_get(context, image_id) if image['deleted']: raise exception.NotFound() except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() else: return dict(image) def show(self, req, image_id): self._enforce(req, 'get_image') image = self._get_image(req.context, image_id) image = self._normalize_properties(image) return self._append_tags(req.context, image) @utils.mutating def update(self, req, image_id, changes): self._enforce(req, 'modify_image') context = req.context image = self._get_image(context, image_id) image = self._normalize_properties(image) updates = self._extract_updates(req, image, changes) tags = None if len(updates) > 0: tags = self._extract_tags(updates) purge_props = 'properties' in updates try: image = self.db_api.image_update(context, image_id, updates, purge_props) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() image = self._normalize_properties(dict(image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image_id, tags) image['tags'] = tags else: self._append_tags(req.context, image) self.notifier.info('image.update', image) return image def _extract_updates(self, req, image, changes): """ Determine the updates to pass to the database api. Given the current image, convert a list of changes to be made into the corresponding update dictionary that should be passed to db_api.image_update. Changes have the following parts op - 'add' a new attribute, 'replace' an existing attribute, or 'remove' an existing attribute. path - A list of path parts for determining which attribute the the operation applies to. value - For 'add' and 'replace', the new value the attribute should assume. For the current use case, there are two types of valid paths. For base attributes (fields stored directly on the Image object) the path must take the form ['<attribute name>']. These attributes are always present so the only valid operation on them is 'replace'. For image properties, the path takes the form ['properties', '<property name>'] and all operations are valid. Future refactoring should simplify this code by hardening the image abstraction such that database details such as how image properties are stored do not have any influence here. """ updates = {} property_updates = image['properties'] for change in changes: path = change['path'] if len(path) == 1: assert change['op'] == 'replace' key = change['path'][0] if key == 'is_public' and change['value']: self._enforce(req, 'publicize_image') updates[key] = change['value'] else: assert len(path) == 2 assert path[0] == 'properties' update_method_name = '_do_%s_property' % change['op'] assert hasattr(self, update_method_name) update_method = getattr(self, update_method_name) update_method(property_updates, change) updates['properties'] = property_updates return updates def _do_replace_property(self, updates, change): """ Replace a single image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_add_property(self, updates, change): """ Add a new image property, ensuring it does not already exist. """ key = change['path'][1] if key in updates: msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_remove_property(self, updates, change): """ Remove an image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) del updates[key] @utils.mutating def delete(self, req, image_id): self._enforce(req, 'delete_image') image = self._get_image(req.context, image_id) if image['protected']: msg = _("Unable to delete as image %(image_id)s is protected" % locals()) raise webob.exc.HTTPForbidden(explanation=msg) status = 'deleted' if image['location']: if CONF.delayed_delete: status = 'pending_delete' self.store_api.schedule_delayed_delete_from_backend( image['location'], id) else: self.store_api.safe_delete_from_backend(image['location'], req.context, id) try: self.db_api.image_update(req.context, image_id, {'status': status}) self.db_api.image_destroy(req.context, image_id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to delete" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound() else: self.notifier.info('image.delete', image) class RequestDeserializer(wsgi.JSONRequestDeserializer): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'size', 'direct_url', 'self', 'file', 'schema'] _reserved_properties = ['owner', 'is_public', 'location', 'deleted', 'deleted_at'] _base_properties = ['checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'status', 'tags', 'updated_at', 'visibility', 'protected'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _parse_image(self, request): body = self._get_request_body(request) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) # Ensure all specified properties are allowed self._check_readonly(body) self._check_reserved(body) # Create a dict of base image properties, with user- and deployer- # defined properties contained in a 'properties' dictionary image = {'properties': body} for key in self._base_properties: try: image[key] = image['properties'].pop(key) except KeyError: pass if 'visibility' in image: image['is_public'] = image.pop('visibility') == 'public' return {'image': image} def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if not 'body' in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_readonly(cls, image): for key in cls._readonly_properties: if key in image: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) @classmethod def _check_reserved(cls, image): for key in cls._reserved_properties: if key in image: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) def create(self, request): return self._parse_image(request) def _get_change_operation(self, raw_change): op = None for key in ['replace', 'add', 'remove']: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path(self, raw_change, op): key = self._decode_json_pointer(raw_change[op]) if key in self._readonly_properties: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) if key in self._reserved_properties: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) # For image properties, we need to put "properties" at the beginning if key not in self._base_properties: return ['properties', key] return [key] def _decode_json_pointer(self, pointer): """ Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) return pointer.lstrip('/').replace('~1', '/').replace('~0', '~') def _validate_json_pointer(self, pointer): """ Validate a json pointer. We only accept a limited form of json pointers. Specifically, we do not allow multiple levels of indirection, so there can only be one '/' in the pointer, located at the start of the string. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if '/' in pointer[1:]: msg = _('Pointer `%s` contains more than one "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if re.match('~[^01]', pointer): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): if change['op'] == 'delete': return partial_image = {change['path'][-1]: change['value']} try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) def update(self, request): changes = [] valid_content_types = [ 'application/openstack-images-v2.0-json-patch' ] if request.content_type not in valid_content_types: headers = {'Accept-Patch': ','.join(valid_content_types)} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) op = self._get_change_operation(raw_change) path = self._get_change_path(raw_change, op) change = {'op': op, 'path': path} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) if change['path'] == ['visibility']: change['path'] = ['is_public'] change['value'] = change['value'] == 'public' changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s' % sort_dir) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.pop('visibility', None) if visibility: if visibility in ['public', 'private']: filters['is_public'] = visibility == 'public' else: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image['id'] if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _get_image_links(self, image): return [ {'rel': 'self', 'href': self._get_image_href(image)}, {'rel': 'file', 'href': self._get_image_href(image, 'file')}, {'rel': 'describedby', 'href': '/v2/schemas/image'}, ] def _format_image(self, image): #NOTE(bcwaldon): merge the contained properties dict with the # top-level image object image_view = image['properties'] attributes = ['id', 'name', 'disk_format', 'container_format', 'size', 'status', 'checksum', 'tags', 'protected', 'created_at', 'updated_at', 'min_ram', 'min_disk'] for key in attributes: image_view[key] = image[key] location = image['location'] if CONF.show_image_direct_url and location is not None: image_view['direct_url'] = location visibility = 'public' if image['is_public'] else 'private' image_view['visibility'] = visibility image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' self._serialize_datetimes(image_view) image_view = self.schema.filter(image_view) return image_view @staticmethod def _serialize_datetimes(image): for (key, value) in image.iteritems(): if isinstance(value, datetime.datetime): image[key] = timeutils.isotime(value) def create(self, response, image): response.status_int = 201 body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' response.location = self._get_image_href(image) def show(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def update(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urllib.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urllib.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = unicode(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 _BASE_PROPERTIES = { 'id': { 'type': 'string', 'description': 'An identifier for the image', 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': 'string', 'description': 'Descriptive name for the image', 'maxLength': 255, }, 'status': { 'type': 'string', 'description': 'Status of the image', 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'], }, 'visibility': { 'type': 'string', 'description': 'Scope of image accessibility', 'enum': ['public', 'private'], }, 'protected': { 'type': 'boolean', 'description': 'If true, image will not be deletable.', }, 'checksum': { 'type': 'string', 'description': 'md5 hash of image contents.', 'type': 'string', 'maxLength': 32, }, 'size': { 'type': 'integer', 'description': 'Size of image file in bytes', }, 'container_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['bare', 'ovf', 'ami', 'aki', 'ari'], }, 'disk_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', 'ari', 'ami'], }, 'created_at': { 'type': 'string', 'description': 'Date and time of image registration', #TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! #'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': 'Date and time of the last image modification', #'format': 'date-time', }, 'tags': { 'type': 'array', 'description': 'List of strings related to the image', 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'description': 'URL to access the image file kept in external store', }, 'min_ram': { 'type': 'integer', 'description': 'Amount of ram (in MB) required to boot image.', }, 'min_disk': { 'type': 'integer', 'description': 'Amount of disk space (in GB) required to boot image.', }, 'self': {'type': 'string'}, 'file': {'type': 'string'}, 'schema': {'type': 'string'}, } _BASE_LINKS = [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = copy.deepcopy(_BASE_PROPERTIES) links = copy.deepcopy(_BASE_LINKS) if CONF.allow_additional_image_properties: schema = glance.schema.PermissiveSchema('image', properties, links) else: schema = glance.schema.Schema('image', properties) schema.merge_properties(custom_properties or {}) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: schema_file = open(match) schema_data = schema_file.read() return json.loads(schema_data) else: msg = _('Could not find schema properties file %s. Continuing ' 'without custom properties') LOG.warn(msg % filename) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3784_0
crossvul-python_data_good_3725_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import urllib import urlparse import uuid from keystone.common import logging from keystone.common import manager from keystone.common import wsgi from keystone import config from keystone import exception from keystone import policy from keystone import token CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. :returns: (user_ref, tenant_ref, metadata_ref) :raises: AssertionError """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. :returns: tenant_ref :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. :returns: tenant_ref :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. :returns: role_ref :raises: keystone.exception.RoleNotFound """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. :returns: a list of user_refs or an empty list """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. :returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): """Add user to a tenant without an explicit role relationship. :raises: keystone.exception.TenantNotFound, keystone.exception.UserNotFound """ raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): """Remove user from a tenant without an explicit role relationship. :raises: keystone.exception.TenantNotFound, keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_all_tenants(self): """FIXME(dolph): Lists all tenants in the system? I'm not sure how this is different from get_tenants, why get_tenants isn't documented as part of the driver, or why it's called get_tenants instead of list_tenants (i.e. list_roles and list_users)... :returns: a list of ... FIXME(dolph): tenant_refs or tenant_id's? """ raise exception.NotImplemented() def get_tenant_users(self, tenant_id): """FIXME(dolph): Lists all users with a relationship to the specified tenant? :returns: a list of ... FIXME(dolph): user_refs or user_id's? :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. :returns: a list of tenant_id's. :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. :returns: a list of role ids. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound, keystone.exception.RoleNotFound """ raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound, keystone.exception.RoleNotFound """ raise exception.NotImplemented() # user crud def create_user(self, user_id, user): """Creates a new user. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_user(self, user_id, user): """Updates an existing user. :raises: keystone.exception.UserNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_user(self, user_id): """Deletes an existing user. :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): """Creates a new tenant. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): """Updates an existing tenant. :raises: keystone.exception.TenantNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_tenant(self, tenant_id): """Deletes an existing tenant. :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): """Creates a new role. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_role(self, role_id, role): """Updates an existing role. :raises: keystone.exception.RoleNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_role(self, role_id): """Deletes an existing role. :raises: keystone.exception.RoleNotFound """ raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(method=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) return {'tenant': self.identity_api.get_tenant(context, tenant_id)} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) if not 'name' in tenant_ref or not tenant_ref['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) self.assert_admin(context) tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex) tenant = self.identity_api.create_tenant( context, tenant_ref['id'], tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id): self.assert_admin(context) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') first_index = 0 if marker is not None: for (marker_index, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker first_index = marker_index + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') last_index = None if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) last_index = first_index + limit tenant_refs = tenant_refs[first_index:last_index] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) return {'user': self.identity_api.get_user(context, user_id)} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) return {'users': self.identity_api.list_users(context)} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) if not 'name' in user or not user['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or not user.get('enabled', True): try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" self.assert_admin(context) # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) return {'role': self.identity_api.get_role(context, role_id)} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) if not 'name' in role or not role['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) return {'roles': self.identity_api.list_roles(context)} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) # Ensure user exists by getting it first. self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3725_0
crossvul-python_data_good_3697_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # # Copyright 2011, Piston Cloud Computing, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to resize, repartition, and modify disk images. Includes injection of SSH PGP keys into authorized_keys file. """ import crypt import os import random import tempfile from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt.disk import guestfs from nova.virt.disk import loop from nova.virt.disk import nbd from nova.virt import images LOG = logging.getLogger(__name__) disk_opts = [ cfg.StrOpt('injected_network_template', default='$pybasedir/nova/virt/interfaces.template', help='Template file for injected network'), cfg.ListOpt('img_handlers', default=['loop', 'nbd', 'guestfs'], help='Order of methods used to mount disk images'), # NOTE(yamahata): ListOpt won't work because the command may include a # comma. For example: # # mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to # escape such commas. # cfg.MultiStrOpt('virt_mkfs', default=[ 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs' ' --force --fast --label %(fs_label)s %(target)s', # NOTE(yamahata): vfat case #'windows=mkfs.vfat -n %(fs_label)s %(target)s', ], help='mkfs commands for ephemeral device. ' 'The format is <os_type>=<mkfs command>'), ] FLAGS = flags.FLAGS FLAGS.register_opts(disk_opts) _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None for s in FLAGS.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. # So item.partition('=') doesn't work here os_type, mkfs_command = s.split('=', 1) if os_type: _MKFS_COMMAND[os_type] = mkfs_command if os_type == 'default': _DEFAULT_MKFS_COMMAND = mkfs_command def mkfs(os_type, fs_label, target): mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % locals() if mkfs_command: utils.execute(*mkfs_command.split()) def resize2fs(image, check_exit_code=False): utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code) utils.execute('resize2fs', image, check_exit_code=check_exit_code) def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path)['virtual size'] size = size.split('(')[1].split()[0] return int(size) def extend(image, size): """Increase image to size""" virt_size = get_disk_size(image) if virt_size >= size: return utils.execute('qemu-img', 'resize', image, size) # NOTE(vish): attempts to resize filesystem resize2fs(image) def can_resize_fs(image, size, use_cow=False): """Check whether we can resize contained file system.""" # Check that we're increasing the size virt_size = get_disk_size(image) if virt_size >= size: return False # Check the image is unpartitioned if use_cow: # Try to mount an unpartitioned qcow2 image try: inject_data(image, use_cow=True) except exception.NovaException: return False else: # For raw, we can directly inspect the file system try: utils.execute('e2label', image) except exception.ProcessExecutionError: return False return True def bind(src, target, instance_name): """Bind device to a filesytem""" if src: utils.execute('touch', target, run_as_root=True) utils.execute('mount', '-o', 'bind', src, target, run_as_root=True) s = os.stat(src) cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev), os.minor(s.st_rdev)) cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/" "%s/devices.allow" % instance_name) utils.execute('tee', cgroups_path, process_input=cgroup_info, run_as_root=True) def unbind(target): if target: utils.execute('umount', target, run_as_root=True) class _DiskImage(object): """Provide operations on a disk image file.""" tmp_prefix = 'openstack-disk-mount-tmp' def __init__(self, image, partition=None, use_cow=False, mount_dir=None): # These passed to each mounter self.image = image self.partition = partition self.mount_dir = mount_dir # Internal self._mkdir = False self._mounter = None self._errors = [] # As a performance tweak, don't bother trying to # directly loopback mount a cow image. self.handlers = FLAGS.img_handlers[:] if use_cow and 'loop' in self.handlers: self.handlers.remove('loop') if not self.handlers: msg = _('no capable image handler configured') raise exception.NovaException(msg) if mount_dir: # Note the os.path.ismount() shortcut doesn't # work with libguestfs due to permissions issues. device = self._device_for_path(mount_dir) if device: self._reset(device) @staticmethod def _device_for_path(path): device = None with open("/proc/mounts", 'r') as ifp: for line in ifp: fields = line.split() if fields[1] == path: device = fields[0] break return device def _reset(self, device): """Reset internal state for a previously mounted directory.""" mounter_cls = self._handler_class(device=device) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir, device=device) self._mounter = mounter mount_name = os.path.basename(self.mount_dir or '') self._mkdir = mount_name.startswith(self.tmp_prefix) @property def errors(self): """Return the collated errors from all operations.""" return '\n--\n'.join([''] + self._errors) @staticmethod def _handler_class(mode=None, device=None): """Look up the appropriate class to use based on MODE or DEVICE.""" for cls in (loop.Mount, nbd.Mount, guestfs.Mount): if mode and cls.mode == mode: return cls elif device and cls.device_id_string in device: return cls msg = _("no disk image handler for: %s") % mode or device raise exception.NovaException(msg) def mount(self): """Mount a disk image, using the object attributes. The first supported means provided by the mount classes is used. True, or False is returned and the 'errors' attribute contains any diagnostics. """ if self._mounter: raise exception.NovaException(_('image already mounted')) if not self.mount_dir: self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix) self._mkdir = True try: for h in self.handlers: mounter_cls = self._handler_class(h) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir) if mounter.do_mount(): self._mounter = mounter break else: LOG.debug(mounter.error) self._errors.append(mounter.error) finally: if not self._mounter: self.umount() # rmdir return bool(self._mounter) def umount(self): """Unmount a disk image from the file system.""" try: if self._mounter: self._mounter.do_umount() finally: if self._mkdir: os.rmdir(self.mount_dir) # Public module functions def inject_data(image, key=None, net=None, metadata=None, admin_password=None, files=None, partition=None, use_cow=False): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If partition is not specified it mounts the image as a single partition. """ img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: inject_data_into_fs(img.mount_dir, key, net, metadata, admin_password, files) finally: img.umount() else: raise exception.NovaException(img.errors) def setup_container(image, container_dir, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. """ img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) if not img.mount(): LOG.error(_("Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s") % {"image": img, "target": container_dir, "errors": img.errors}) raise exception.NovaException(img.errors) def destroy_container(container_dir): """Destroy the container once it terminates. It will umount the container that is mounted, and delete any linked devices. """ try: img = _DiskImage(image=None, mount_dir=container_dir) img.umount() except Exception, exn: LOG.exception(_('Failed to unmount container filesystem: %s'), exn) def inject_data_into_fs(fs, key, net, metadata, admin_password, files): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data """ if key: _inject_key_into_fs(key, fs) if net: _inject_net_into_fs(net, fs) if metadata: _inject_metadata_into_fs(metadata, fs) if admin_password: _inject_admin_password_into_fs(admin_password, fs) if files: for (path, contents) in files: _inject_file_into_fs(fs, path, contents) def _join_and_check_path_within_fs(fs, *args): '''os.path.join() with safety check for injected file paths. Join the supplied path components and make sure that the resulting path we are injecting into is within the mounted guest fs. Trying to be clever and specifying a path with '..' in it will hit this safeguard. ''' absolute_path, _err = utils.execute('readlink', '-nm', os.path.join(fs, *args), run_as_root=True) if not absolute_path.startswith(os.path.realpath(fs) + '/'): raise exception.Invalid(_('injected file path not valid')) return absolute_path def _inject_file_into_fs(fs, path, contents, append=False): absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) args = [] if append: args.append('-a') args.append(absolute_path) kwargs = dict(process_input=contents, run_as_root=True) utils.execute('tee', *args, **kwargs) def _inject_metadata_into_fs(metadata, fs): metadata = dict([(m.key, m.value) for m in metadata]) _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata)) def _setup_selinux_for_keys(fs): """Get selinux guests to ensure correct context on injected keys.""" se_cfg = _join_and_check_path_within_fs(fs, 'etc', 'selinux') se_cfg, _err = utils.trycmd('readlink', '-e', se_cfg, run_as_root=True) if not se_cfg: return rclocal = _join_and_check_path_within_fs(fs, 'etc', 'rc.local') # Support systemd based systems rc_d = _join_and_check_path_within_fs(fs, 'etc', 'rc.d') rclocal_e, _err = utils.trycmd('readlink', '-e', rclocal, run_as_root=True) rc_d_e, _err = utils.trycmd('readlink', '-e', rc_d, run_as_root=True) if not rclocal_e and rc_d_e: rclocal = os.path.join(rc_d, 'rc.local') # Note some systems end rc.local with "exit 0" # and so to append there you'd need something like: # utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True) restorecon = [ '#!/bin/sh\n', '# Added by Nova to ensure injected ssh keys have the right context\n', 'restorecon -RF /root/.ssh/ 2>/dev/null || :\n', ] rclocal_rel = os.path.relpath(rclocal, fs) _inject_file_into_fs(fs, rclocal_rel, ''.join(restorecon), append=True) utils.execute('chmod', 'a+x', rclocal, run_as_root=True) def _inject_key_into_fs(key, fs): """Add the given public ssh key to root's authorized_keys. key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh') utils.execute('mkdir', '-p', sshdir, run_as_root=True) utils.execute('chown', 'root', sshdir, run_as_root=True) utils.execute('chmod', '700', sshdir, run_as_root=True) keyfile = os.path.join('root', '.ssh', 'authorized_keys') key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n', ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) _setup_selinux_for_keys(fs) def _inject_net_into_fs(net, fs): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = _join_and_check_path_within_fs(fs, 'etc', 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net) def _inject_admin_password_into_fs(admin_passwd, fs): """Set the root password to admin_passwd admin_password is a root password fs is the path to the base of the filesystem into which to inject the key. This method modifies the instance filesystem directly, and does not require a guest agent running in the instance. """ # The approach used here is to copy the password and shadow # files from the instance filesystem to local files, make any # necessary changes, and then copy them back. admin_user = 'root' fd, tmp_passwd = tempfile.mkstemp() os.close(fd) fd, tmp_shadow = tempfile.mkstemp() os.close(fd) passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd') shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow') utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True) utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True) _set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow) utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True) os.unlink(tmp_passwd) utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True) os.unlink(tmp_shadow) def _set_passwd(username, admin_passwd, passwd_file, shadow_file): """set the password for username to admin_passwd The passwd_file is not modified. The shadow_file is updated. if the username is not found in both files, an exception is raised. :param username: the username :param encrypted_passwd: the encrypted password :param passwd_file: path to the passwd file :param shadow_file: path to the shadow password file :returns: nothing :raises: exception.NovaException(), IOError() """ salt_set = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789./') # encryption algo - id pairs for crypt() algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''} salt = 16 * ' ' salt = ''.join([random.choice(salt_set) for c in salt]) # crypt() depends on the underlying libc, and may not support all # forms of hash. We try md5 first. If we get only 13 characters back, # then the underlying crypt() didn't understand the '$n$salt' magic, # so we fall back to DES. # md5 is the default because it's widely supported. Although the # local crypt() might support stronger SHA, the target instance # might not. encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt) if len(encrypted_passwd) == 13: encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt) try: p_file = open(passwd_file, 'rb') s_file = open(shadow_file, 'rb') # username MUST exist in passwd file or it's an error found = False for entry in p_file: split_entry = entry.split(':') if split_entry[0] == username: found = True break if not found: msg = _('User %(username)s not found in password file.') raise exception.NovaException(msg % username) # update password in the shadow file.It's an error if the # the user doesn't exist. new_shadow = list() found = False for entry in s_file: split_entry = entry.split(':') if split_entry[0] == username: split_entry[1] = encrypted_passwd found = True new_entry = ':'.join(split_entry) new_shadow.append(new_entry) s_file.close() if not found: msg = _('User %(username)s not found in shadow file.') raise exception.NovaException(msg % username) s_file = open(shadow_file, 'wb') for entry in new_shadow: s_file.write(entry) finally: p_file.close() s_file.close()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3697_2
crossvul-python_data_good_3633_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implementation of SQLAlchemy backend. """ import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import ipv6 from nova import utils from nova import log as logging from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS LOG = logging.getLogger("nova.db.sqlalchemy") def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def can_read_deleted(context): """Indicates if the context has access to deleted objects.""" if not context: return False return context.read_deleted def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requres the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.api.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requres the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.api.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and \ len(service_ref.compute_node) != 0: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): if not session: session = get_session() result = session.query(models.Service).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): session = get_session() query = session.query(models.Service).\ filter_by(deleted=can_read_deleted(context)) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): topic = 'compute' session = get_session() result = session.query(models.Service).\ options(joinedload('compute_node')).\ filter_by(deleted=False).\ filter_by(host=host).\ filter_by(topic=topic).\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return session.query(models.Service, func.coalesce(sort_value, 0)).\ filter_by(topic=topic).\ filter_by(deleted=False).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = session.query(models.Instance.host, func.sum(models.Instance.vcpus).label(label)).\ filter_by(deleted=False).\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_network_sorted(context): session = get_session() with session.begin(): topic = 'network' label = 'network_count' subq = session.query(models.Network.host, func.count(models.Network.id).label(label)).\ filter_by(deleted=False).\ group_by(models.Network.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = session.query(models.Volume.host, func.sum(models.Volume.size).label(label)).\ filter_by(deleted=False).\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): session = get_session() result = session.query(models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): if not session: session = get_session() result = session.query(models.ComputeNode).\ filter_by(id=compute_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_create(context, values): compute_node_ref = models.ComputeNode() compute_node_ref.update(values) compute_node_ref.save() return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values): session = get_session() with session.begin(): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) ################### @require_admin_context def certificate_get(context, certificate_id, session=None): if not session: session = get_session() result = session.query(models.Certificate).\ filter_by(id=certificate_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_destroy(context, certificate_id): session = get_session() with session.begin(): certificate_ref = certificate_get(context, certificate_id, session=session) certificate_ref.delete(session=session) @require_admin_context def certificate_get_all_by_project(context, project_id): session = get_session() return session.query(models.Certificate).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): session = get_session() return session.query(models.Certificate).\ filter_by(user_id=user_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_get_all_by_user_and_project(_context, user_id, project_id): session = get_session() return session.query(models.Certificate).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_update(context, certificate_id, values): session = get_session() with session.begin(): certificate_ref = certificate_get(context, certificate_id, session=session) for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save(session=session) ################### @require_context def floating_ip_get(context, id): session = get_session() result = None if is_admin_context(context): result = session.query(models.FloatingIp).\ options(joinedload('fixed_ip')).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(id=id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.FloatingIp).\ options(joinedload('fixed_ip')).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=context.project_id).\ filter_by(id=id).\ filter_by(deleted=False).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_allocate_address(context, project_id): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = session.query(models.FloatingIp).\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() # TODO(tr3buchet): why leave auto_assigned floating IPs out? return session.query(models.FloatingIp).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ filter_by(deleted=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip = fixed_ip_ref floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = floating_ip_ref.fixed_ip if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) @require_admin_context def floating_ip_get_all(context): session = get_session() floating_ip_refs = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(deleted=False).\ all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): session = get_session() floating_ip_refs = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(host=host).\ filter_by(deleted=False).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ filter_by(deleted=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): if not session: session = get_session() result = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.network')).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(deleted=False).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance is not None: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network: fixed_ip_ref.network = network_get(context, network_id, session=session) fixed_ip_ref.instance = instance_get(context, instance_id, session=session) session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(deleted=False).\ filter_by(instance=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if not fixed_ip_ref.network: fixed_ip_ref.network = network_get(context, network_id, session=session) if instance_id: fixed_ip_ref.instance = instance_get(context, instance_id, session=session) if host: fixed_ip_ref.host = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(_context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(_context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.instance = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(_context, host, time): session = get_session() inner_q = session.query(models.Network.id).\ filter_by(host=host).\ subquery() result = session.query(models.FixedIp).\ filter(models.FixedIp.network_id.in_(inner_q)).\ filter(models.FixedIp.updated_at < time).\ filter(models.FixedIp.instance_id != None).\ filter_by(allocated=False).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_admin_context def fixed_ip_get_all(context, session=None): if not session: session = get_session() result = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_admin_context def fixed_ip_get_all_by_instance_host(context, host=None): session = get_session() result = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ join(models.FixedIp.instance).\ filter_by(state=1).\ filter_by(host=host).\ all() if not result: raise exception.FixedIpNotFoundForHost(host=host) return result @require_context def fixed_ip_get_by_address(context, address, session=None): if not session: session = get_session() result = session.query(models.FixedIp).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('floating_ips')).\ options(joinedload('network')).\ options(joinedload('instance')).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): session = get_session() rv = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return rv @require_context def fixed_ip_get_by_network_host(context, network_id, host): session = get_session() rv = session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(host=host).\ filter_by(deleted=False).\ first() if not rv: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return rv @require_context def fixed_ip_get_by_virtual_interface(context, vif_id): session = get_session() rv = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ filter_by(virtual_interface_id=vif_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.FixedIpNotFoundForVirtualInterface(vif_id=vif_id) return rv @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in teh database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def virtual_interface_update(context, vif_id, values): """Update a virtual interface record in the database. :param vif_id: = id of virtual interface to update :param values: = values to update """ session = get_session() with session.begin(): vif_ref = virtual_interface_get(context, vif_id, session=session) vif_ref.update(values) vif_ref.save(session=session) return vif_ref @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ if not session: session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(id=vif_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(address=address).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(uuid=vif_uuid).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with. :param fixed_ip_id: = id of the fixed_ip """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(fixed_ip_id=fixed_ip_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retreive vifs for """ session = get_session() vif_refs = session.query(models.VirtualInterface).\ filter_by(instance_id=instance_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_admin_context def virtual_interface_get_by_network(context, network_id): """Gets all virtual_interface on network. :param network_id: = network to retreive vifs for """ session = get_session() vif_refs = session.query(models.VirtualInterface).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from teh database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() instance_ref['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_stop(context, instance_id): session = get_session() with session.begin(): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, 'vm_state': vm_states.STOPPED, 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'updated_at': literal_column('updated_at')}) @require_context def instance_get_by_uuid(context, uuid, session=None): partial = _build_instance_get(context, session=session) result = partial.filter_by(uuid=uuid) result = result.first() if not result: # FIXME(sirp): it would be nice if InstanceNotFound would accept a # uuid parameter as well raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): partial = _build_instance_get(context, session=session) result = partial.filter_by(id=instance_id) result = result.first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): if not session: session = get_session() partial = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('virtual_interfaces')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) if is_admin_context(context): partial = partial.filter_by(deleted=can_read_deleted(context)) elif is_user_context(context): partial = partial.filter_by(project_id=context.project_id).\ filter_by(deleted=False) return partial @require_admin_context def instance_get_all(context): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('virtual_interfaces.network')).\ options(joinedload_all( 'virtual_interfaces.fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_filters(context, filters): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_ipv6(instance, filter_re): for interface in instance['virtual_interfaces']: fixed_ipv6 = interface.get('fixed_ipv6') if fixed_ipv6 and filter_re.match(fixed_ipv6): return True return False def _regexp_filter_by_ip(instance, filter_re): for interface in instance['virtual_interfaces']: for fixed_ip in interface['fixed_ips']: if not fixed_ip or not fixed_ip['address']: continue if filter_re.match(fixed_ip['address']): return True for floating_ip in fixed_ip.get('floating_ips', []): if not floating_ip or not floating_ip['address']: continue if filter_re.match(floating_ip['address']): return True return False def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} \ for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False def _exact_match_filter(query, column, value): """Do exact match against a column. value to match can be a list so you can match any value in the list. """ if isinstance(value, list): column_attr = getattr(models.Instance, column) return query.filter(column_attr.in_(value)) else: filter_dict = {} filter_dict[column] = value return query.filter_by(**filter_dict) session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('virtual_interfaces.network')).\ options(joinedload_all( 'virtual_interfaces.fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(desc(models.Instance.created_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = filters['changes-since'] query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] for filter_name in query_filters: # Do the matching and remove the filter from the dictionary # so we don't try it again below.. query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {'ip6': _regexp_filter_by_ipv6, 'ip': _regexp_filter_by_ip} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('instance_type')).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_all_by_user(context, user_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @require_admin_context def instance_get_all_by_host(context, host): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() query = session.query(models.Instance).\ filter_by(reservation_id=reservation_id).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) if is_admin_context(context): return query.\ filter_by(deleted=can_read_deleted(context)).\ all() elif is_user_context(context): return query.\ filter_by(project_id=context.project_id).\ filter_by(deleted=False).\ all() @require_context def instance_get_by_fixed_ip(context, address): """Return instance ref by exact match of FixedIP""" fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.instance @require_context def instance_get_by_fixed_ipv6(context, address): """Return instance ref by exact match of IPv6""" session = get_session() # convert IPv6 address to mac mac = ipv6.to_mac(address) # get virtual interface vif_ref = virtual_interface_get_by_address(context, mac) # look up instance based on instance_id from vif row result = session.query(models.Instance).\ filter_by(id=vif_ref['instance_id']) return result @require_admin_context def instance_get_project_vpn(context, project_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @require_context def instance_get_fixed_addresses(context, instance_id): session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) try: fixed_ips = fixed_ip_get_by_instance(context, instance_id) except exception.NotFound: return [] return [fixed_ip.address for fixed_ip in fixed_ips] @require_context def instance_get_fixed_addresses_v6(context, instance_id): session = get_session() with session.begin(): # get instance instance_ref = instance_get(context, instance_id, session=session) # assume instance has 1 mac for each network associated with it # get networks associated with instance network_refs = network_get_all_by_instance(context, instance_id) # compile a list of cidr_v6 prefixes sorted by network id prefixes = [ref.cidr_v6 for ref in sorted(network_refs, key=lambda ref: ref.id)] # get vifs associated with instance vif_refs = virtual_interface_get_by_instance(context, instance_ref.id) # compile list of the mac_addresses for vifs sorted by network id macs = [vif_ref['address'] for vif_ref in sorted(vif_refs, key=lambda vif_ref: vif_ref['network_id'])] # get project id from instance project_id = instance_ref.project_id # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs]) # return list containing ipv6 address for each tuple return [ipv6.to_global(*t) for t in prefix_mac_tuples] @require_context def instance_get_floating_address(context, instance_id): fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id) if not fixed_ip_refs: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips if not fixed_ip_refs[0].floating_ips: return None # NOTE(vish): this just returns the first floating ip return fixed_ip_refs[0].floating_ips[0]['address'] @require_context def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_id, values.pop('metadata'), delete=True) with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() if utils.is_uuid_like(instance_id): instance = instance_get_by_uuid(context, instance_id, session) instance_id = instance.id return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all() ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) if not session: session = get_session() result = session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() return session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(deleted=False).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneosly picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return session.query(models.Network).\ filter_by(deleted=False).\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so assocaite # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): session = get_session() return session.query(models.Network).\ filter_by(deleted=can_read_deleted(context)).\ count() @require_admin_context def network_count_allocated_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_available_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=False).\ filter_by(reserved=False).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_reserved_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(reserved=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_create_safe(context, values): network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): network_ref = network_get(context, network_id=network_id, \ session=session) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_admin_context def network_disassociate_all(context): session = get_session() session.query(models.Network).\ update({'project_id': None, 'updated_at': literal_column('updated_at')}) @require_context def network_get(context, network_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Network).\ filter_by(id=network_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Network).\ filter_by(project_id=context.project_id).\ filter_by(id=network_id).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): session = get_session() result = session.query(models.Network).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): session = get_session() project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = session.query(models.Network).\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject(network_uuid=uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ options(joinedload_all('instance')).\ filter_by(network_id=network_id).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None).\ filter_by(deleted=False).\ all() @require_admin_context def network_get_by_bridge(context, bridge): session = get_session() result = session.query(models.Network).\ filter_by(bridge=bridge).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): session = get_session() result = session.query(models.Network).\ filter_by(uuid=uuid).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): session = get_session() result = session.query(models.Network).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(_context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from session = get_session() rv = session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ first() if not rv: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @require_admin_context def network_get_all_by_instance(_context, instance_id): session = get_session() rv = session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @require_admin_context def network_get_all_by_host(context, host): session = get_session() with session.begin(): # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.FixedIp.host == host) return session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter(host_filter).\ filter_by(deleted=False).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = session.query(models.Network).\ filter_by(id=network_id).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def export_device_count(context): session = get_session() return session.query(models.ExportDevice).\ filter_by(deleted=can_read_deleted(context)).\ count() @require_admin_context def export_device_create_safe(context, values): export_device_ref = models.ExportDevice() export_device_ref.update(values) try: export_device_ref.save() return export_device_ref except IntegrityError: return None ################### @require_admin_context def iscsi_target_count_by_host(context, host): session = get_session() return session.query(models.IscsiTarget).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): if session is None: session = get_session() tk = session.query(models.AuthToken).\ filter_by(token_hash=token_hash).\ filter_by(deleted=can_read_deleted(context)).\ first() if not tk: raise exception.AuthTokenNotFound(token=token_hash) return tk @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(_context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): if not session: session = get_session() result = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter_by(deleted=False).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() result = {'project_id': project_id} rows = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_admin_context def volume_allocate_shelf_and_blade(context, volume_id): session = get_session() with session.begin(): export_device = session.query(models.ExportDevice).\ filter_by(volume=None).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not export_device: raise db.NoMoreBlades() export_device.volume_id = volume_id session.add(export_device) return (export_device.shelf_id, export_device.blade_id) @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = session.query(models.IscsiTarget).\ filter_by(volume=None).\ filter_by(host=host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Volume.id), func.sum(models.Volume.size)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def volume_get(context, volume_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_instance(context, volume_id): session = get_session() result = session.query(models.Volume).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_shelf_and_blade(context, volume_id): session = get_session() result = session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id) return (result.shelf_id, result.blade_id) @require_admin_context def volume_get_iscsi_target_num(context, volume_id): session = get_session() result = session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### @require_context @require_volume_exists def volume_metadata_get(context, volume_id): session = get_session() meta_results = session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() meta_dict = {} for i in meta_results: meta_dict[i['key']] = i['value'] return meta_dict @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): session = get_session() session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_delete_all(context, volume_id): session = get_session() session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): if not session: session = get_session() meta_result = session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not meta_result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return meta_result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Snapshot).\ filter_by(project_id=context.project_id).\ filter_by(id=snapshot_id).\ filter_by(deleted=False).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): session = get_session() return session.query(models.Snapshot).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Snapshot).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ filter_by(deleted=False).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ filter_by(deleted=False).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not result: return [] return result @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_context def security_group_get_all(context): session = get_session() return session.query(models.SecurityGroup).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload_all('rules')).\ all() @require_context def security_group_get(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroup).\ filter_by(deleted=can_read_deleted(context),).\ filter_by(id=security_group_id).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() else: result = session.query(models.SecurityGroup).\ filter_by(deleted=False).\ filter_by(id=security_group_id).\ filter_by(project_id=context.project_id).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): session = get_session() result = session.query(models.SecurityGroup).\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject(project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): session = get_session() return session.query(models.SecurityGroup).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ all() @require_context def security_group_get_by_instance(context, instance_id): session = get_session() return session.query(models.SecurityGroup).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def security_group_destroy_all(context, session=None): if not session: session = get_session() with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def security_group_count_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.SecurityGroup).\ filter_by(deleted=False).\ filter_by(project_id=project_id).\ count() ################### @require_context def security_group_rule_get(context, security_group_rule_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(id=security_group_rule_id).\ first() else: # TODO(vish): Join to group and check for project_id result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() else: result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() return result @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(group_id=security_group_id).\ all() else: result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(group_id=security_group_id).\ all() return result @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) @require_context def security_group_rule_count_by_group(context, security_group_id): session = get_session() return session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(parent_group_id=security_group_id).\ count() ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): session = get_session() return session.query(models.ProviderFirewallRule).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def provider_fw_rule_get_all_by_cidr(context, cidr): session = get_session() return session.query(models.ProviderFirewallRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(cidr=cidr).\ all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): if not session: session = get_session() result = session.query(models.User).\ filter_by(id=id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): if not session: session = get_session() result = session.query(models.User).\ filter_by(access_key=access_key).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(_context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): session = get_session() return session.query(models.User).\ filter_by(deleted=can_read_deleted(context)).\ all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) ################### def project_create(_context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): if not session: session = get_session() result = session.query(models.Project).\ filter_by(deleted=False).\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): session = get_session() return session.query(models.Project).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): session = get_session() user = session.query(models.User).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true session = get_session() result = session.query(models.Network).\ filter_by(project_id=project_id).\ filter_by(deleted=False).all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result @require_context def project_get_networks_v6(context, project_id): return project_get_networks(context, project_id) ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): if not session: session = get_session() result = session.query(models.Migration).\ filter_by(id=id).first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): session = get_session() result = session.query(models.Migration).\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): session = get_session() result = session.query(models.ConsolePool).\ filter_by(deleted=False).\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): session = get_session() result = session.query(models.ConsolePool).\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ filter_by(deleted=False).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType(host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): session = get_session() return session.query(models.ConsolePool).\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(deleted=False).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # consoles are meant to be transient. (mdragon) session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): session = get_session() result = session.query(models.Console).\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): session = get_session() results = session.query(models.Console).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ all() return results def console_get(context, console_id, instance_id=None): session = get_session() query = session.query(models.Console).\ filter_by(id=console_id) if instance_id: query = query.filter_by(instance_id=instance_id) result = query.options(joinedload('pool')).first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance(console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(_context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save() except Exception, e: raise exception.DBError(e) return instance_type_ref def _dict_with_extra_specs(inst_type_query): """Takes an instance OR volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in \ inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict @require_context def instance_type_get_all(context, inactive=False): """ Returns a dict describing all instance_types with name as key. """ session = get_session() if inactive: inst_types = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ order_by("name").\ all() else: inst_types = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() inst_dict = {} if inst_types: for i in inst_types: inst_dict[i['name']] = _dict_with_extra_specs(i) return inst_dict @require_context def instance_type_get(context, id): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not inst_type: raise exception.InstanceTypeNotFound(instance_type=id) else: return _dict_with_extra_specs(inst_type) @require_context def instance_type_get_by_name(context, name): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not inst_type: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return _dict_with_extra_specs(inst_type) @require_context def instance_type_get_by_flavor_id(context, id): """Returns a dict describing specific flavor_id""" try: flavor_id = int(id) except ValueError: raise exception.FlavorNotFound(flavor_id=id) session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(flavorid=flavor_id).\ first() if not inst_type: raise exception.FlavorNotFound(flavor_id=flavor_id) else: return _dict_with_extra_specs(inst_type) @require_admin_context def instance_type_destroy(context, name): """ Marks specific instance_type as deleted""" session = get_session() instance_type_ref = session.query(models.InstanceTypes).\ filter_by(name=name) records = instance_type_ref.update(dict(deleted=True)) if records == 0: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref @require_admin_context def instance_type_purge(context, name): """ Removes specific instance_type from DB Usually instance_type_destroy should be used """ session = get_session() instance_type_ref = session.query(models.InstanceTypes).\ filter_by(name=name) records = instance_type_ref.delete() if records == 0: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref #################### @require_admin_context def zone_create(context, values): zone = models.Zone() zone.update(values) zone.save() return zone @require_admin_context def zone_update(context, zone_id, values): session = get_session() zone = session.query(models.Zone).filter_by(id=zone_id).first() if not zone: raise exception.ZoneNotFound(zone_id=zone_id) zone.update(values) zone.save(session=session) return zone @require_admin_context def zone_delete(context, zone_id): session = get_session() with session.begin(): session.query(models.Zone).\ filter_by(id=zone_id).\ delete() @require_admin_context def zone_get(context, zone_id): session = get_session() result = session.query(models.Zone).filter_by(id=zone_id).first() if not result: raise exception.ZoneNotFound(zone_id=zone_id) return result @require_admin_context def zone_get_all(context): session = get_session() return session.query(models.Zone).all() #################### @require_context @require_instance_exists def instance_metadata_get(context, instance_id): session = get_session() meta_results = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() meta_dict = {} for i in meta_results: meta_dict[i['key']] = i['value'] return meta_dict @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): session = get_session() session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_delete_all(context, instance_id): session = get_session() session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): if not session: session = get_session() meta_result = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not meta_result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return meta_result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): if not session: session = get_session() return session.query(models.AgentBuild).\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ filter_by(deleted=False).\ first() @require_admin_context def agent_build_get_all(context): session = get_session() return session.query(models.AgentBuild).\ filter_by(deleted=False).\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): session.query(models.AgentBuild).\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = session.query(models.AgentBuild).\ filter_by(id=agent_build_id). \ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def instance_type_extra_specs_get(context, instance_type_id): session = get_session() spec_results = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(deleted=False).\ all() spec_dict = {} for i in spec_results: spec_dict[i['key']] = i['value'] return spec_dict @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): session = get_session() session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): if not session: session = get_session() spec_result = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not spec_result: raise exception.\ InstanceTypeExtraSpecsNotFound(extra_specs_key=key, instance_type_id=instance_type_id) return spec_result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(_context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters={}): """ Returns a dict describing all volume_types with name as key. """ session = get_session() if inactive: vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ order_by("name").\ all() else: vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() vol_dict = {} if vol_types: for i in vol_types: vol_dict[i['name']] = _dict_with_extra_specs(i) return vol_dict @require_context def volume_type_get(context, id): """Returns a dict describing specific volume_type""" session = get_session() vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not vol_type: raise exception.VolumeTypeNotFound(volume_type=id) else: return _dict_with_extra_specs(vol_type) @require_context def volume_type_get_by_name(context, name): """Returns a dict describing specific volume_type""" session = get_session() vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not vol_type: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(vol_type) @require_admin_context def volume_type_destroy(context, name): """ Marks specific volume_type as deleted""" session = get_session() volume_type_ref = session.query(models.VolumeTypes).\ filter_by(name=name) records = volume_type_ref.update(dict(deleted=True)) if records == 0: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return volume_type_ref @require_admin_context def volume_type_purge(context, name): """ Removes specific volume_type from DB Usually volume_type_destroy should be used """ session = get_session() volume_type_ref = session.query(models.VolumeTypes).\ filter_by(name=name) records = volume_type_ref.delete() if records == 0: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return volume_type_ref #################### @require_context def volume_type_extra_specs_get(context, volume_type_id): session = get_session() spec_results = session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(deleted=False).\ all() spec_dict = {} for i in spec_results: spec_dict[i['key']] = i['value'] return spec_dict @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): if not session: session = get_session() spec_result = session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not spec_result: raise exception.\ VolumeTypeExtraSpecsNotFound(extra_specs_key=key, volume_type_id=volume_type_id) return spec_result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### @require_admin_context def vsa_create(context, values): """ Creates Virtual Storage Array record. """ try: vsa_ref = models.VirtualStorageArray() vsa_ref.update(values) vsa_ref.save() except Exception, e: raise exception.DBError(e) return vsa_ref @require_admin_context def vsa_update(context, vsa_id, values): """ Updates Virtual Storage Array record. """ session = get_session() with session.begin(): vsa_ref = vsa_get(context, vsa_id, session=session) vsa_ref.update(values) vsa_ref.save(session=session) return vsa_ref @require_admin_context def vsa_destroy(context, vsa_id): """ Deletes Virtual Storage Array record. """ session = get_session() with session.begin(): session.query(models.VirtualStorageArray).\ filter_by(id=vsa_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def vsa_get(context, vsa_id, session=None): """ Get Virtual Storage Array record by ID. """ if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(id=vsa_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=vsa_id).\ filter_by(deleted=False).\ first() if not result: raise exception.VirtualStorageArrayNotFound(id=vsa_id) return result @require_admin_context def vsa_get_all(context): """ Get all Virtual Storage Array records. """ session = get_session() return session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def vsa_get_all_by_project(context, project_id): """ Get all Virtual Storage Array records by project ID. """ authorize_project_context(context, project_id) session = get_session() return session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() ####################
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_4
crossvul-python_data_good_3633_5
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova import exception from nova import flags FLAGS = flags.FLAGS flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') flags.DEFINE_integer('quota_ram', 50 * 1024, 'megabytes of instance ram allowed per project') flags.DEFINE_integer('quota_volumes', 10, 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') flags.DEFINE_integer('quota_floating_ips', 10, 'number of floating ips allowed per project') flags.DEFINE_integer('quota_metadata_items', 128, 'number of metadata items allowed per instance') flags.DEFINE_integer('quota_max_injected_files', 5, 'number of injected files allowed') flags.DEFINE_integer('quota_max_injected_file_content_bytes', 10 * 1024, 'number of bytes allowed per injected file') flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') flags.DEFINE_integer('quota_security_groups', 10, 'number of security groups per project') flags.DEFINE_integer('quota_security_group_rules', 20, 'number of security rules per security group') def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_max_injected_files, 'injected_file_content_bytes': FLAGS.quota_max_injected_file_content_bytes, 'security_groups': FLAGS.quota_security_groups, 'security_group_rules': FLAGS.quota_security_group_rules, } # -1 in the quota flags means unlimited for key in defaults.keys(): if defaults[key] == -1: defaults[key] = None return defaults def get_project_quotas(context, project_id): rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: rval[key] = quota[key] return rval def _get_request_allotment(requested, used, quota): if quota is None: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus'], allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def allowed_security_groups(context, requested_security_groups): """Check quota and return min(requested, allowed) security groups.""" project_id = context.project_id context = context.elevated() used_sec_groups = db.security_group_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_sec_groups = _get_request_allotment(requested_security_groups, used_sec_groups, quota['security_groups']) return min(requested_security_groups, allowed_sec_groups) def allowed_security_group_rules(context, security_group_id, requested_rules): """Check quota and return min(requested, allowed) sec group rules.""" project_id = context.project_id context = context.elevated() used_rules = db.security_group_rule_count_by_group(context, security_group_id) quota = get_project_quotas(context, project_id) allowed_rules = _get_request_allotment(requested_rules, used_rules, quota['security_group_rules']) return min(requested_rules, allowed_rules) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes class QuotaError(exception.ApiError): """Quota Exceeded.""" pass
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_5
crossvul-python_data_bad_3785_0
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import json import re import urllib import webob.exc from glance.api import policy import glance.api.v2 as v2 from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import timeutils import glance.schema import glance.store LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.db_api.configure_db() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance.store self.store_api.create_stores() def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise webob.exc.HTTPForbidden() def _normalize_properties(self, image): """Convert the properties from the stored format to a dict The db api returns a list of dicts that look like {'name': <key>, 'value': <value>}, while it expects a format like {<key>: <value>} in image create and update calls. This function takes the extra step that the db api should be responsible for in the image get calls. The db api will also return deleted image properties that must be filtered out. """ properties = [(p['name'], p['value']) for p in image['properties'] if not p['deleted']] image['properties'] = dict(properties) return image def _extract_tags(self, image): try: #NOTE(bcwaldon): cast to set to make the list unique, then # cast back to list since that's a more sane response type return list(set(image.pop('tags'))) except KeyError: pass def _append_tags(self, context, image): image['tags'] = self.db_api.image_tag_get_all(context, image['id']) return image @utils.mutating def create(self, req, image): self._enforce(req, 'add_image') is_public = image.get('is_public') if is_public: self._enforce(req, 'publicize_image') image['owner'] = req.context.owner image['status'] = 'queued' tags = self._extract_tags(image) image = dict(self.db_api.image_create(req.context, image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image['id'], tags) image['tags'] = tags else: image['tags'] = [] v2.update_image_read_acl(req, self.store_api, self.db_api, image) image = self._normalize_properties(dict(image)) self.notifier.info('image.update', image) return image def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters={}): self._enforce(req, 'get_images') filters['deleted'] = False #NOTE(bcwaldon): is_public=True gets public images and those # owned by the authenticated tenant result = {} filters.setdefault('is_public', True) if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) try: images = self.db_api.image_get_all(req.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) if len(images) != 0 and len(images) == limit: result['next_marker'] = images[-1]['id'] except exception.InvalidFilterRangeValue as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.InvalidSortKey as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.NotFound as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) images = [self._normalize_properties(dict(image)) for image in images] result['images'] = [self._append_tags(req.context, image) for image in images] return result def _get_image(self, context, image_id): try: return self.db_api.image_get(context, image_id) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() def show(self, req, image_id): self._enforce(req, 'get_image') image = self._get_image(req.context, image_id) image = self._normalize_properties(dict(image)) return self._append_tags(req.context, image) @utils.mutating def update(self, req, image_id, changes): self._enforce(req, 'modify_image') context = req.context try: image = self.db_api.image_get(context, image_id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to update" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound(explanation=msg) image = self._normalize_properties(dict(image)) updates = self._extract_updates(req, image, changes) tags = None if len(updates) > 0: tags = self._extract_tags(updates) purge_props = 'properties' in updates try: image = self.db_api.image_update(context, image_id, updates, purge_props) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() image = self._normalize_properties(dict(image)) v2.update_image_read_acl(req, self.store_api, self.db_api, image) if tags is not None: self.db_api.image_tag_set_all(req.context, image_id, tags) image['tags'] = tags else: self._append_tags(req.context, image) self.notifier.info('image.update', image) return image def _extract_updates(self, req, image, changes): """ Determine the updates to pass to the database api. Given the current image, convert a list of changes to be made into the corresponding update dictionary that should be passed to db_api.image_update. Changes have the following parts op - 'add' a new attribute, 'replace' an existing attribute, or 'remove' an existing attribute. path - A list of path parts for determining which attribute the the operation applies to. value - For 'add' and 'replace', the new value the attribute should assume. For the current use case, there are two types of valid paths. For base attributes (fields stored directly on the Image object) the path must take the form ['<attribute name>']. These attributes are always present so the only valid operation on them is 'replace'. For image properties, the path takes the form ['properties', '<property name>'] and all operations are valid. Future refactoring should simplify this code by hardening the image abstraction such that database details such as how image properties are stored do not have any influence here. """ updates = {} property_updates = image['properties'] for change in changes: path = change['path'] if len(path) == 1: assert change['op'] == 'replace' key = change['path'][0] if key == 'is_public' and change['value']: self._enforce(req, 'publicize_image') updates[key] = change['value'] else: assert len(path) == 2 assert path[0] == 'properties' update_method_name = '_do_%s_property' % change['op'] assert hasattr(self, update_method_name) update_method = getattr(self, update_method_name) update_method(property_updates, change) updates['properties'] = property_updates return updates def _do_replace_property(self, updates, change): """ Replace a single image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_add_property(self, updates, change): """ Add a new image property, ensuring it does not already exist. """ key = change['path'][1] if key in updates: msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_remove_property(self, updates, change): """ Remove an image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) del updates[key] @utils.mutating def delete(self, req, image_id): self._enforce(req, 'delete_image') image = self._get_image(req.context, image_id) if image['protected']: msg = _("Unable to delete as image %(image_id)s is protected" % locals()) raise webob.exc.HTTPForbidden(explanation=msg) status = 'deleted' if image['location']: if CONF.delayed_delete: status = 'pending_delete' self.store_api.schedule_delayed_delete_from_backend( image['location'], id) else: self.store_api.safe_delete_from_backend(image['location'], req.context, id) try: self.db_api.image_update(req.context, image_id, {'status': status}) self.db_api.image_destroy(req.context, image_id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to delete" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound() else: self.notifier.info('image.delete', image) class RequestDeserializer(wsgi.JSONRequestDeserializer): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'size', 'direct_url', 'self', 'file', 'schema'] _reserved_properties = ['owner', 'is_public', 'location', 'deleted', 'deleted_at'] _base_properties = ['checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'status', 'tags', 'updated_at', 'visibility', 'protected'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _parse_image(self, request): body = self._get_request_body(request) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) # Ensure all specified properties are allowed self._check_readonly(body) self._check_reserved(body) # Create a dict of base image properties, with user- and deployer- # defined properties contained in a 'properties' dictionary image = {'properties': body} for key in self._base_properties: try: image[key] = image['properties'].pop(key) except KeyError: pass if 'visibility' in image: image['is_public'] = image.pop('visibility') == 'public' return {'image': image} def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if not 'body' in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_readonly(cls, image): for key in cls._readonly_properties: if key in image: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) @classmethod def _check_reserved(cls, image): for key in cls._reserved_properties: if key in image: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) def create(self, request): return self._parse_image(request) def _get_change_operation(self, raw_change): op = None for key in ['replace', 'add', 'remove']: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path(self, raw_change, op): key = self._decode_json_pointer(raw_change[op]) if key in self._readonly_properties: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) if key in self._reserved_properties: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) # For image properties, we need to put "properties" at the beginning if key not in self._base_properties: return ['properties', key] return [key] def _decode_json_pointer(self, pointer): """ Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) return pointer.lstrip('/').replace('~1', '/').replace('~0', '~') def _validate_json_pointer(self, pointer): """ Validate a json pointer. We only accept a limited form of json pointers. Specifically, we do not allow multiple levels of indirection, so there can only be one '/' in the pointer, located at the start of the string. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if '/' in pointer[1:]: msg = _('Pointer `%s` contains more than one "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if re.match('~[^01]', pointer): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): if change['op'] == 'delete': return partial_image = {change['path'][-1]: change['value']} try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) def update(self, request): changes = [] valid_content_types = [ 'application/openstack-images-v2.0-json-patch' ] if request.content_type not in valid_content_types: headers = {'Accept-Patch': ','.join(valid_content_types)} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) op = self._get_change_operation(raw_change) path = self._get_change_path(raw_change, op) change = {'op': op, 'path': path} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) if change['path'] == ['visibility']: change['path'] = ['is_public'] change['value'] = change['value'] == 'public' changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s' % sort_dir) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.pop('visibility', None) if visibility: if visibility in ['public', 'private']: filters['is_public'] = visibility == 'public' else: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image['id'] if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _get_image_links(self, image): return [ {'rel': 'self', 'href': self._get_image_href(image)}, {'rel': 'file', 'href': self._get_image_href(image, 'file')}, {'rel': 'describedby', 'href': '/v2/schemas/image'}, ] def _format_image(self, image): #NOTE(bcwaldon): merge the contained properties dict with the # top-level image object image_view = image['properties'] attributes = ['id', 'name', 'disk_format', 'container_format', 'size', 'status', 'checksum', 'tags', 'protected', 'created_at', 'updated_at', 'min_ram', 'min_disk'] for key in attributes: image_view[key] = image[key] location = image['location'] if CONF.show_image_direct_url and location is not None: image_view['direct_url'] = location visibility = 'public' if image['is_public'] else 'private' image_view['visibility'] = visibility image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' self._serialize_datetimes(image_view) image_view = self.schema.filter(image_view) return image_view @staticmethod def _serialize_datetimes(image): for (key, value) in image.iteritems(): if isinstance(value, datetime.datetime): image[key] = timeutils.isotime(value) def create(self, response, image): response.status_int = 201 body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' response.location = self._get_image_href(image) def show(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def update(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urllib.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urllib.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = unicode(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 _BASE_PROPERTIES = { 'id': { 'type': 'string', 'description': 'An identifier for the image', 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': 'string', 'description': 'Descriptive name for the image', 'maxLength': 255, }, 'status': { 'type': 'string', 'description': 'Status of the image', 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'], }, 'visibility': { 'type': 'string', 'description': 'Scope of image accessibility', 'enum': ['public', 'private'], }, 'protected': { 'type': 'boolean', 'description': 'If true, image will not be deletable.', }, 'checksum': { 'type': 'string', 'description': 'md5 hash of image contents.', 'type': 'string', 'maxLength': 32, }, 'size': { 'type': 'integer', 'description': 'Size of image file in bytes', }, 'container_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['bare', 'ovf', 'ami', 'aki', 'ari'], }, 'disk_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', 'ari', 'ami'], }, 'created_at': { 'type': 'string', 'description': 'Date and time of image registration', #TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! #'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': 'Date and time of the last image modification', #'format': 'date-time', }, 'tags': { 'type': 'array', 'description': 'List of strings related to the image', 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'description': 'URL to access the image file kept in external store', }, 'min_ram': { 'type': 'integer', 'description': 'Amount of ram (in MB) required to boot image.', }, 'min_disk': { 'type': 'integer', 'description': 'Amount of disk space (in GB) required to boot image.', }, 'self': {'type': 'string'}, 'file': {'type': 'string'}, 'schema': {'type': 'string'}, } _BASE_LINKS = [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = copy.deepcopy(_BASE_PROPERTIES) links = copy.deepcopy(_BASE_LINKS) if CONF.allow_additional_image_properties: schema = glance.schema.PermissiveSchema('image', properties, links) else: schema = glance.schema.Schema('image', properties) schema.merge_properties(custom_properties or {}) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: schema_file = open(match) schema_data = schema_file.read() return json.loads(schema_data) else: msg = _('Could not find schema properties file %s. Continuing ' 'without custom properties') LOG.warn(msg % filename) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3785_0
crossvul-python_data_good_3690_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref, expires=old_token_ref['expires'])) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3690_0
crossvul-python_data_good_3633_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', 'Template string to be used to generate snapshot names') flags.DEFINE_string('vsa_name_template', 'vsa-%08x', 'Template string to be used to generate VSA names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') class NoMoreBlades(exception.Error): """No more available blades.""" pass class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available blades""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_network_sorted(context): """Get all network services sorted by network count. :returns: a list of (Service, network_count) tuples. """ return IMPL.service_get_all_network_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id, session=None): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_destroy(context, certificate_id): """Destroy the certificate or raise if it does not exist.""" return IMPL.certificate_destroy(context, certificate_id) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) def certificate_update(context, certificate_id, values): """Set the given properties on an certificate and update it. Raises NotFound if service does not exist. """ return IMPL.certificate_update(context, certificate_id, values) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_allocate_address(context, project_id): """Allocate free floating ip and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_all_by_instance_host(context, host): """Get all allocated fixed ips filtered by instance host.""" return IMPL.fixed_ip_get_all_by_instance_host(context, host) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ip_get_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_update(context, vif_id, values): """Update a virtual interface record in the database.""" return IMPL.virtual_interface_update(context, vif_id, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with.""" return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_get_by_network(context, network_id): """Gets all virtual interfaces on network.""" return IMPL.virtual_interface_get_by_network(context, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_stop(context, instance_id): """Stop the instance or raise if it does not exist.""" return IMPL.instance_stop(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_user(context, user_id): """Get all instances.""" return IMPL.instance_get_all_by_user(context, user_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_by_fixed_ip(context, address): """Get an instance for a fixed ip by address.""" return IMPL.instance_get_by_fixed_ip(context, address) def instance_get_by_fixed_ipv6(context, address): """Get an instance for a fixed ip by IPv6 address.""" return IMPL.instance_get_by_fixed_ipv6(context, address) def instance_get_fixed_addresses(context, instance_id): """Get the fixed ip address of an instance.""" return IMPL.instance_get_fixed_addresses(context, instance_id) def instance_get_fixed_addresses_v6(context, instance_id): return IMPL.instance_get_fixed_addresses_v6(context, instance_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_project_vpn(context, project_id): """Get a vpn instance by project or return None.""" return IMPL.instance_get_project_vpn(context, project_id) def instance_set_state(context, instance_id, state, description=None): """Set the state of an instance.""" return IMPL.instance_set_state(context, instance_id, state, description) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_id): """Get instance actions by instance id.""" return IMPL.instance_get_actions(context, instance_id) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_allocated_ips(context, network_id): """Return the number of allocated non-reserved ips in the network.""" return IMPL.network_count_allocated_ips(context, network_id) def network_count_available_ips(context, network_id): """Return the number of available ips in the network.""" return IMPL.network_count_available_ips(context, network_id) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_disassociate_all(context): """Disassociate all networks from projects.""" return IMPL.network_disassociate_all(context) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_get_vpn_ip(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_vpn_ip(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def export_device_count(context): """Return count of export devices.""" return IMPL.export_device_count(context) def export_device_create_safe(context, values): """Create an export_device from the values dictionary. The device is not returned. If the create violates the unique constraints because the shelf_id and blade_id already exist, no exception is raised. """ return IMPL.export_device_create_safe(context, values) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" return IMPL.volume_allocate_shelf_and_blade(context, volume_id) def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_shelf_and_blade(context, volume_id): """Get the shelf and blade allocated to the volume.""" return IMPL.volume_get_shelf_and_blade(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) def security_group_destroy_all(context): """Deletes a security group.""" return IMPL.security_group_destroy_all(context) def security_group_count_by_project(context, project_id): """Count number of security groups in a project.""" return IMPL.security_group_count_by_project(context, project_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) def security_group_rule_count_by_group(context, security_group_id): """Count rules in a given security group.""" return IMPL.security_group_rule_count_by_group(context, security_group_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_get_all_by_cidr(context, cidr): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) def project_get_networks_v6(context, project_id): return IMPL.project_get_networks_v6(context, project_id) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False): """Get all instance types.""" return IMPL.instance_type_get_all(context, inactive) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) def instance_type_purge(context, name): """Purges (removes) an instance type from DB. Use instance_type_destroy for most cases """ return IMPL.instance_type_purge(context, name) #################### def zone_create(context, values): """Create a new child Zone entry.""" return IMPL.zone_create(context, values) def zone_update(context, zone_id, values): """Update a child Zone entry.""" return IMPL.zone_update(context, zone_id, values) def zone_delete(context, zone_id): """Delete a child Zone.""" return IMPL.zone_delete(context, zone_id) def zone_get(context, zone_id): """Get a specific child Zone.""" return IMPL.zone_get(context, zone_id) def zone_get_all(context): """Get all child Zones.""" return IMPL.zone_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) def volume_type_purge(context, name): """Purges (removes) a volume type from DB. Use volume_type_destroy for most cases """ return IMPL.volume_type_purge(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) #################### def vsa_create(context, values): """Creates Virtual Storage Array record.""" return IMPL.vsa_create(context, values) def vsa_update(context, vsa_id, values): """Updates Virtual Storage Array record.""" return IMPL.vsa_update(context, vsa_id, values) def vsa_destroy(context, vsa_id): """Deletes Virtual Storage Array record.""" return IMPL.vsa_destroy(context, vsa_id) def vsa_get(context, vsa_id): """Get Virtual Storage Array record by ID.""" return IMPL.vsa_get(context, vsa_id) def vsa_get_all(context): """Get all Virtual Storage Array records.""" return IMPL.vsa_get_all(context) def vsa_get_all_by_project(context, project_id): """Get all Virtual Storage Array records by project ID.""" return IMPL.vsa_get_all_by_project(context, project_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_3
crossvul-python_data_good_3698_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.rootwrap import filters filterlist = [ # nova/virt/disk/mount.py: 'kpartx', '-a', device # nova/virt/disk/mount.py: 'kpartx', '-d', device filters.CommandFilter("/sbin/kpartx", "root"), # nova/virt/disk/mount.py: 'tune2fs', '-c', 0, '-i', 0, mapped_device # nova/virt/xenapi/vm_utils.py: "tune2fs", "-O ^has_journal", part_path # nova/virt/xenapi/vm_utils.py: "tune2fs", "-j", partition_path filters.CommandFilter("/sbin/tune2fs", "root"), # nova/virt/disk/mount.py: 'mount', mapped_device, mount_dir # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. filters.CommandFilter("/bin/mount", "root"), # nova/virt/disk/mount.py: 'umount', mapped_device # nova/virt/xenapi/vm_utils.py: 'umount', dev_path filters.CommandFilter("/bin/umount", "root"), # nova/virt/disk/nbd.py: 'qemu-nbd', '-c', device, image # nova/virt/disk/nbd.py: 'qemu-nbd', '-d', device filters.CommandFilter("/usr/bin/qemu-nbd", "root"), # nova/virt/disk/loop.py: 'losetup', '--find', '--show', image # nova/virt/disk/loop.py: 'losetup', '--detach', device filters.CommandFilter("/sbin/losetup", "root"), # nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-i' # nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-m' dev filters.CommandFilter("/usr/bin/guestmount", "root"), # nova/virt/disk/guestfs.py: 'fusermount', 'u', mount_dir filters.CommandFilter("/bin/fusermount", "root"), filters.CommandFilter("/usr/bin/fusermount", "root"), # nova/virt/disk/api.py: 'tee', metadata_path # nova/virt/disk/api.py: 'tee', '-a', keyfile # nova/virt/disk/api.py: 'tee', netfile filters.CommandFilter("/usr/bin/tee", "root"), # nova/virt/disk/api.py: 'mkdir', '-p', sshdir # nova/virt/disk/api.py: 'mkdir', '-p', netdir filters.CommandFilter("/bin/mkdir", "root"), # nova/virt/disk/api.py: 'chown', 'root', sshdir # nova/virt/disk/api.py: 'chown', 'root:root', netdir # nova/virt/libvirt/connection.py: 'chown', os.getuid(), console_log # nova/virt/libvirt/connection.py: 'chown', os.getuid(), console_log # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') # nova/utils.py: 'chown', owner_uid, path filters.CommandFilter("/bin/chown", "root"), # nova/virt/disk/api.py: 'chmod', '700', sshdir # nova/virt/disk/api.py: 'chmod', 755, netdir filters.CommandFilter("/bin/chmod", "root"), # nova/virt/disk/api.py: 'cp', os.path.join(fs... filters.CommandFilter("/bin/cp", "root"), # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... # nova/network/linux_net.py: 'ip', 'link', 'set', interface, "address",.. # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, "address", .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' filters.CommandFilter("/sbin/ip", "root"), # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev filters.CommandFilter("/usr/sbin/tunctl", "root"), filters.CommandFilter("/bin/tunctl", "root"), # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... # nova/network/linux_net.py: 'ovs-vsctl', .... filters.CommandFilter("/usr/bin/ovs-vsctl", "root"), # nova/network/linux_net.py: 'ovs-ofctl', .... filters.CommandFilter("/usr/bin/ovs-ofctl", "root"), # nova/virt/libvirt/connection.py: 'dd', "if=%s" % virsh_output, ... filters.CommandFilter("/bin/dd", "root"), # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... filters.CommandFilter("/sbin/iscsiadm", "root"), # nova/virt/xenapi/vm_utils.py: "parted", "--script", ... # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. filters.CommandFilter("/sbin/parted", "root"), filters.CommandFilter("/usr/sbin/parted", "root"), # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s filters.CommandFilter("/sbin/fdisk", "root"), # nova/virt/xenapi/vm_utils.py: "e2fsck", "-f", "-p", partition_path filters.CommandFilter("/sbin/e2fsck", "root"), # nova/virt/xenapi/vm_utils.py: "resize2fs", partition_path filters.CommandFilter("/sbin/resize2fs", "root"), # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd,), '-t', ... filters.CommandFilter("/sbin/iptables-save", "root"), filters.CommandFilter("/usr/sbin/iptables-save", "root"), filters.CommandFilter("/sbin/ip6tables-save", "root"), filters.CommandFilter("/usr/sbin/ip6tables-save", "root"), # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) filters.CommandFilter("/sbin/iptables-restore", "root"), filters.CommandFilter("/usr/sbin/iptables-restore", "root"), filters.CommandFilter("/sbin/ip6tables-restore", "root"), filters.CommandFilter("/usr/sbin/ip6tables-restore", "root"), # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. filters.CommandFilter("/usr/bin/arping", "root"), filters.CommandFilter("/sbin/arping", "root"), # nova/network/linux_net.py: 'route', '-n' # nova/network/linux_net.py: 'route', 'del', 'default', 'gw' # nova/network/linux_net.py: 'route', 'add', 'default', 'gw' # nova/network/linux_net.py: 'route', '-n' # nova/network/linux_net.py: 'route', 'del', 'default', 'gw', old_gw, .. # nova/network/linux_net.py: 'route', 'add', 'default', 'gw', old_gateway filters.CommandFilter("/sbin/route", "root"), # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address filters.CommandFilter("/usr/bin/dhcp_release", "root"), # nova/network/linux_net.py: 'kill', '-9', pid # nova/network/linux_net.py: 'kill', '-HUP', pid filters.KillFilter("/bin/kill", "root", ['-9', '-HUP'], ['/usr/sbin/dnsmasq']), # nova/network/linux_net.py: 'kill', pid filters.KillFilter("/bin/kill", "root", [''], ['/usr/sbin/radvd']), # nova/network/linux_net.py: dnsmasq call filters.DnsmasqFilter("/usr/sbin/dnsmasq", "root"), # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'),.. filters.CommandFilter("/usr/sbin/radvd", "root"), # nova/network/linux_net.py: 'brctl', 'addbr', bridge # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface filters.CommandFilter("/sbin/brctl", "root"), filters.CommandFilter("/usr/sbin/brctl", "root"), # nova/virt/libvirt/utils.py: 'mkswap' # nova/virt/xenapi/vm_utils.py: 'mkswap' filters.CommandFilter("/sbin/mkswap", "root"), # nova/virt/xenapi/vm_utils.py: 'mkfs' filters.CommandFilter("/sbin/mkfs", "root"), # nova/virt/libvirt/utils.py: 'qemu-img' filters.CommandFilter("/usr/bin/qemu-img", "root"), # nova/virt/disk/api.py: 'readlink', '-e' filters.CommandFilter("/usr/bin/readlink", "root"), filters.CommandFilter("/bin/readlink", "root"), # nova/virt/disk/api.py: 'touch', target filters.CommandFilter("/usr/bin/touch", "root"), # nova/virt/libvirt/connection.py: filters.ReadFileFilter("/etc/iscsi/initiatorname.iscsi"), ]
./CrossVul/dataset_final_sorted/CWE-264/py/good_3698_0
crossvul-python_data_bad_1622_3
import os from io import StringIO import stat import subprocess import sys import shutil import tempfile import time import unittest from hashlib import sha256 from attic import xattr from attic.archive import Archive, ChunkBuffer from attic.archiver import Archiver from attic.crypto import bytes_to_long, num_aes_blocks from attic.helpers import Manifest from attic.remote import RemoteRepository, PathNotAllowed from attic.repository import Repository from attic.testsuite import AtticTestCase from attic.testsuite.mock import patch try: import llfuse has_llfuse = True except ImportError: has_llfuse = False has_lchflags = hasattr(os, 'lchflags') src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__), '..') class changedir: def __init__(self, dir): self.dir = dir def __enter__(self): self.old = os.getcwd() os.chdir(self.dir) def __exit__(self, *args, **kw): os.chdir(self.old) class ArchiverTestCaseBase(AtticTestCase): prefix = '' def setUp(self): os.environ['ATTIC_CHECK_I_KNOW_WHAT_I_AM_DOING'] = '1' self.archiver = Archiver() self.tmpdir = tempfile.mkdtemp() self.repository_path = os.path.join(self.tmpdir, 'repository') self.repository_location = self.prefix + self.repository_path self.input_path = os.path.join(self.tmpdir, 'input') self.output_path = os.path.join(self.tmpdir, 'output') self.keys_path = os.path.join(self.tmpdir, 'keys') self.cache_path = os.path.join(self.tmpdir, 'cache') self.exclude_file_path = os.path.join(self.tmpdir, 'excludes') os.environ['ATTIC_KEYS_DIR'] = self.keys_path os.environ['ATTIC_CACHE_DIR'] = self.cache_path os.mkdir(self.input_path) os.mkdir(self.output_path) os.mkdir(self.keys_path) os.mkdir(self.cache_path) with open(self.exclude_file_path, 'wb') as fd: fd.write(b'input/file2\n# A commment line, then a blank line\n\n') self._old_wd = os.getcwd() os.chdir(self.tmpdir) def tearDown(self): shutil.rmtree(self.tmpdir) os.chdir(self._old_wd) def attic(self, *args, **kw): exit_code = kw.get('exit_code', 0) fork = kw.get('fork', False) if fork: try: output = subprocess.check_output((sys.executable, '-m', 'attic.archiver') + args) ret = 0 except subprocess.CalledProcessError as e: output = e.output ret = e.returncode output = os.fsdecode(output) if ret != exit_code: print(output) self.assert_equal(exit_code, ret) return output args = list(args) stdout, stderr = sys.stdout, sys.stderr try: output = StringIO() sys.stdout = sys.stderr = output ret = self.archiver.run(args) sys.stdout, sys.stderr = stdout, stderr if ret != exit_code: print(output.getvalue()) self.assert_equal(exit_code, ret) return output.getvalue() finally: sys.stdout, sys.stderr = stdout, stderr def create_src_archive(self, name): self.attic('create', self.repository_location + '::' + name, src_dir) class ArchiverTestCase(ArchiverTestCaseBase): def create_regular_file(self, name, size=0, contents=None): filename = os.path.join(self.input_path, name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'wb') as fd: if contents is None: contents = b'X' * size fd.write(contents) def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regular_file('empty', size=0) # 2600-01-01 > 2**64 ns os.utime('input/empty', (19880895600, 19880895600)) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('flagfile', size=1024) # Directory self.create_regular_file('dir2/file2', size=1024 * 80) # File owner os.chown('input/file1', 100, 200) # File mode os.chmod('input/file1', 0o7755) os.chmod('input/dir2', 0o555) # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) # Hard link os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink os.symlink('somewhere', os.path.join(self.input_path, 'link1')) if xattr.is_enabled(): xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar') xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False) # FIFO node os.mkfifo(os.path.join(self.input_path, 'fifo1')) if has_lchflags: os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP) def test_basic_functionality(self): self.create_test_files() self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') self.attic('create', self.repository_location + '::test.2', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_equal(len(self.attic('list', self.repository_location).splitlines()), 2) self.assert_equal(len(self.attic('list', self.repository_location + '::test').splitlines()), 11) self.assert_dirs_equal('input', 'output/input') info_output = self.attic('info', self.repository_location + '::test') self.assert_in('Number of files: 4', info_output) shutil.rmtree(self.cache_path) info_output2 = self.attic('info', self.repository_location + '::test') # info_output2 starts with some "initializing cache" text but should # end the same way as info_output assert info_output2.endswith(info_output) def test_strip_components(self): self.attic('init', self.repository_location) self.create_regular_file('dir/file') self.attic('create', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test', '--strip-components', '3') self.assert_true(not os.path.exists('file')) with self.assert_creates_file('file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '2') with self.assert_creates_file('dir/file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '1') with self.assert_creates_file('input/dir/file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '0') def test_extract_include_exclude(self): self.attic('init', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.attic('create', '--exclude=input/file4', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test', 'input/file1', ) self.assert_equal(sorted(os.listdir('output/input')), ['file1']) with changedir('output'): self.attic('extract', '--exclude=input/file2', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) with changedir('output'): self.attic('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) def test_exclude_caches(self): self.attic('init', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('cache1/CACHEDIR.TAG', contents = b'Signature: 8a477f597d28d172789f06886806bc55 extra stuff') self.create_regular_file('cache2/CACHEDIR.TAG', contents = b'invalid signature') self.attic('create', '--exclude-caches', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1']) self.assert_equal(sorted(os.listdir('output/input/cache2')), ['CACHEDIR.TAG']) def test_path_normalization(self): self.attic('init', self.repository_location) self.create_regular_file('dir1/dir2/file', size=1024 * 80) with changedir('input/dir1/dir2'): self.attic('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..') output = self.attic('list', self.repository_location + '::test') self.assert_not_in('..', output) self.assert_in(' input/dir1/dir2/file', output) def test_repeated_files(self): self.create_regular_file('file1', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input', 'input') def test_overwrite(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') # Overwriting regular files and directories should be supported os.mkdir('output/input') os.mkdir('output/input/file1') os.mkdir('output/input/dir2') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') # But non-empty dirs should fail os.unlink('output/input/file1') os.mkdir('output/input/file1') os.mkdir('output/input/file1/dir') with changedir('output'): self.attic('extract', self.repository_location + '::test', exit_code=1) def test_delete(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') self.attic('create', self.repository_location + '::test.2', 'input') self.attic('extract', '--dry-run', self.repository_location + '::test') self.attic('extract', '--dry-run', self.repository_location + '::test.2') self.attic('delete', self.repository_location + '::test') self.attic('extract', '--dry-run', self.repository_location + '::test.2') self.attic('delete', self.repository_location + '::test.2') # Make sure all data except the manifest has been deleted repository = Repository(self.repository_path) self.assert_equal(len(repository), 1) def test_corrupted_repository(self): self.attic('init', self.repository_location) self.create_src_archive('test') self.attic('extract', '--dry-run', self.repository_location + '::test') self.attic('check', self.repository_location) name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[0] fd = open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+') fd.seek(100) fd.write('XXXX') fd.close() self.attic('check', self.repository_location, exit_code=1) def test_readonly_repository(self): self.attic('init', self.repository_location) self.create_src_archive('test') os.system('chmod -R ugo-w ' + self.repository_path) try: self.attic('extract', '--dry-run', self.repository_location + '::test') finally: # Restore permissions so shutil.rmtree is able to delete it os.system('chmod -R u+w ' + self.repository_path) def test_cmdline_compatibility(self): self.create_regular_file('file1', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') output = self.attic('verify', '-v', self.repository_location + '::test') self.assert_in('"attic verify" has been deprecated', output) output = self.attic('prune', self.repository_location, '--hourly=1') self.assert_in('"--hourly" has been deprecated. Use "--keep-hourly" instead', output) def test_prune_repository(self): self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test1', src_dir) self.attic('create', self.repository_location + '::test2', src_dir) output = self.attic('prune', '-v', '--dry-run', self.repository_location, '--keep-daily=2') self.assert_in('Keeping archive: test2', output) self.assert_in('Would prune: test1', output) output = self.attic('list', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.attic('prune', self.repository_location, '--keep-daily=2') output = self.attic('list', self.repository_location) self.assert_not_in('test1', output) self.assert_in('test2', output) def test_usage(self): self.assert_raises(SystemExit, lambda: self.attic()) self.assert_raises(SystemExit, lambda: self.attic('-h')) @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_repository(self): mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) self.attic('init', self.repository_location) self.create_test_files() self.attic('create', self.repository_location + '::archive', 'input') self.attic('create', self.repository_location + '::archive2', 'input') try: self.attic('mount', self.repository_location, mountpoint, fork=True) self.wait_for_mount(mountpoint) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input')) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input')) finally: if sys.platform.startswith('linux'): os.system('fusermount -u ' + mountpoint) else: os.system('umount ' + mountpoint) os.rmdir(mountpoint) # Give the daemon some time to exit time.sleep(.2) @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_archive(self): mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) self.attic('init', self.repository_location) self.create_test_files() self.attic('create', self.repository_location + '::archive', 'input') try: self.attic('mount', self.repository_location + '::archive', mountpoint, fork=True) self.wait_for_mount(mountpoint) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input')) finally: if sys.platform.startswith('linux'): os.system('fusermount -u ' + mountpoint) else: os.system('umount ' + mountpoint) os.rmdir(mountpoint) # Give the daemon some time to exit time.sleep(.2) def verify_aes_counter_uniqueness(self, method): seen = set() # Chunks already seen used = set() # counter values already used def verify_uniqueness(): repository = Repository(self.repository_path) for key, _ in repository.open_index(repository.get_transaction_id()).iteritems(): data = repository.get(key) hash = sha256(data).digest() if not hash in seen: seen.add(hash) num_blocks = num_aes_blocks(len(data) - 41) nonce = bytes_to_long(data[33:41]) for counter in range(nonce, nonce + num_blocks): self.assert_not_in(counter, used) used.add(counter) self.create_test_files() os.environ['ATTIC_PASSPHRASE'] = 'passphrase' self.attic('init', '--encryption=' + method, self.repository_location) verify_uniqueness() self.attic('create', self.repository_location + '::test', 'input') verify_uniqueness() self.attic('create', self.repository_location + '::test.2', 'input') verify_uniqueness() self.attic('delete', self.repository_location + '::test.2') verify_uniqueness() self.assert_equal(used, set(range(len(used)))) def test_aes_counter_uniqueness_keyfile(self): self.verify_aes_counter_uniqueness('keyfile') def test_aes_counter_uniqueness_passphrase(self): self.verify_aes_counter_uniqueness('passphrase') class ArchiverCheckTestCase(ArchiverTestCaseBase): def setUp(self): super(ArchiverCheckTestCase, self).setUp() with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.attic('init', self.repository_location) self.create_src_archive('archive1') self.create_src_archive('archive2') def open_archive(self, name): repository = Repository(self.repository_path) manifest, key = Manifest.load(repository) archive = Archive(repository, key, manifest, name) return archive, repository def test_check_usage(self): output = self.attic('check', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) output = self.attic('check', '--repository-only', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_not_in('Starting archive consistency check', output) output = self.attic('check', '--archives-only', self.repository_location, exit_code=0) self.assert_not_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) def test_missing_file_chunk(self): archive, repository = self.open_archive('archive1') for item in archive.iter_items(): if item[b'path'].endswith('testsuite/archiver.py'): repository.delete(item[b'chunks'][-1][0]) break repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_archive_item_chunk(self): archive, repository = self.open_archive('archive1') repository.delete(archive.metadata[b'items'][-5]) repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_archive_metadata(self): archive, repository = self.open_archive('archive1') repository.delete(archive.id) repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_manifest(self): archive, repository = self.open_archive('archive1') repository.delete(Manifest.MANIFEST_ID) repository.commit() self.attic('check', self.repository_location, exit_code=1) output = self.attic('check', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.attic('check', self.repository_location, exit_code=0) def test_extra_chunks(self): self.attic('check', self.repository_location, exit_code=0) repository = Repository(self.repository_location) repository.put(b'01234567890123456789012345678901', b'xxxx') repository.commit() repository.close() self.attic('check', self.repository_location, exit_code=1) self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) self.attic('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0) class RemoteArchiverTestCase(ArchiverTestCase): prefix = '__testsuite__:' def test_remote_repo_restrict_to_path(self): self.attic('init', self.repository_location) path_prefix = os.path.dirname(self.repository_path) with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']): self.assert_raises(PathNotAllowed, lambda: self.attic('init', self.repository_location + '_1')) with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]): self.attic('init', self.repository_location + '_2') with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]): self.attic('init', self.repository_location + '_3')
./CrossVul/dataset_final_sorted/CWE-264/py/bad_1622_3
crossvul-python_data_good_3632_1
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom from webob import exc import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) allowed = quota.allowed_security_group_rules(context, parent_group_id, 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/good_3632_1
crossvul-python_data_good_3772_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import random import socket import StringIO import subprocess import unittest import nose.plugins.skip from glance.common import config from glance.common import utils from glance.common import wsgi from glance import context from glance.openstack.common import cfg CONF = cfg.CONF def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_dir = os.path.join("/", "tmp", "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir class BaseTestCase(unittest.TestCase): def setUp(self): super(BaseTestCase, self).setUp() #NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) def tearDown(self): super(BaseTestCase, self).tearDown() CONF.reset() def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.iteritems(): CONF.set_override(k, v, group) class skip_test(object): """Decorator that skips a test.""" def __init__(self, msg): self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_if(object): """Decorator that skips a test if condition is true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_unless(object): """Decorator that skips a test if condition is not true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if not self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): raise nose.SkipTest(message) func(*a, **kwargs) return wrapped def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute executable = cmd.split()[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = "Command %(cmd)s did not succeed. Returned an exit "\ "code of %(exitcode)d."\ "\n\nSTDOUT: %(out)s"\ "\n\nSTDERR: %(err)s" % locals() if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() s.close() return port def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, str(value)) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write("XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', '1') except IOError, e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_tok = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_tok: user, tenant, role = auth_tok.split(':') roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, 'auth_tok': auth_tok, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or 'I am a teapot, short and stout\n' self.data = StringIO.StringIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3772_3
crossvul-python_data_good_3772_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import os try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False import routes import webob from glance.api.middleware import context from glance.api.v1 import router import glance.common.client from glance.registry.api import v1 as rserver from glance.tests import utils VERBOSE = False DEBUG = False class FakeRegistryConnection(object): def __init__(self, *args, **kwargs): pass def connect(self): return True def close(self): return True def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank("/" + url.lstrip("/")) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() server = rserver.API(mapper) # NOTE(markwash): we need to pass through context auth information if # we have it. if 'X-Auth-Token' in self.req.headers: api = utils.FakeAuthMiddleware(server) else: api = context.UnauthenticatedContextMiddleware(server) webob_res = self.req.get_response(api) return utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def stub_out_registry_and_store_server(stubs, base_dir): """ Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeSendFile(object): def __init__(self, req): self.req = req def sendfile(self, o, i, offset, nbytes): os.lseek(i, offset, os.SEEK_SET) prev_len = len(self.req.body) self.req.body += os.read(i, nbytes) return len(self.req.body) - prev_len class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() self.stub_force_sendfile = kwargs.get('stub_force_sendfile', SENDFILE_SUPPORTED) def connect(self): return True def close(self): return True def _clean_url(self, url): #TODO(bcwaldon): Fix the hack that strips off v1 return url.replace('/v1', '', 1) if url.startswith('/v1') else url def putrequest(self, method, url): self.req = webob.Request.blank(self._clean_url(url)) if self.stub_force_sendfile: fake_sendfile = FakeSendFile(self.req) stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), \ 'Content-Length and Transfer-Encoding are mutually exclusive' def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(self._clean_url(url)) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if (client.port == DEFAULT_API_PORT and client.host == '0.0.0.0'): return FakeGlanceConnection elif (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.source.app_iter: yield i def fake_sendable(self, body): force = getattr(self, 'stub_force_sendfile', None) if force is None: return self._stub_orig_sendable(body) else: if force: assert glance.common.client.SENDFILE_SUPPORTED return force stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) setattr(glance.common.client.BaseClient, '_stub_orig_sendable', glance.common.client.BaseClient._sendable) stubs.Set(glance.common.client.BaseClient, '_sendable', fake_sendable) stubs.Set(glance.common.client.ImageBodyIterator, '__iter__', fake_image_iter) def stub_out_registry_server(stubs, **kwargs): """ Mocks calls to 127.0.0.1 on 9191 for testing so that a real Glance Registry server does not need to be up and running """ def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 if (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.response.app_iter: yield i stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) stubs.Set(glance.common.client.ImageBodyIterator, '__iter__', fake_image_iter)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3772_1
crossvul-python_data_good_3634_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the nova.db namespace. Call these functions from nova.db namespace, not the nova.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova.openstack.common import cfg from nova import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('instance_name_template', default='instance-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('volume_name_template', default='volume-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%08x', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='nova.db.sqlalchemy.api') class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available targets""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_get_all(context): """Get all computeNodes.""" return IMPL.compute_node_get_all(context) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values, auto_adjust=True): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values, auto_adjust) def compute_node_get_by_host(context, host): return IMPL.compute_node_get_by_host(context, host) def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): return IMPL.compute_node_utilization_update(context, host, free_ram_mb_delta, free_disk_gb_delta, work_delta, vm_delta) def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): return IMPL.compute_node_utilization_set(context, host, free_ram_mb, free_disk_gb, work, vms) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_get_pools(context): """Returns a list of floating ip pools""" return IMPL.floating_ip_get_pools(context) def floating_ip_allocate_address(context, project_id, pool): """Allocate free floating ip from specified pool and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id, pool) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_by_fixed_address(context, fixed_address): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) def dnsdomain_list(context): """Get a list of all zones in our database, public and private.""" return IMPL.dnsdomain_list(context) def dnsdomain_register_for_zone(context, fqdomain, zone): """Associated a DNS domain with an availability zone""" return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) def dnsdomain_register_for_project(context, fqdomain, project): """Associated a DNS domain with a project id""" return IMPL.dnsdomain_register_for_project(context, fqdomain, project) def dnsdomain_unregister(context, fqdomain): """Purge associations for the specified DNS zone""" return IMPL.dnsdomain_unregister(context, fqdomain) def dnsdomain_get(context, fqdomain): """Get the db record for the specified domain.""" return IMPL.dnsdomain_get(context, fqdomain) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_all_unconfirmed(context, confirm_window): """Finds all unconfirmed migrations within the confirmation window.""" return IMPL.migration_get_all_unconfirmed(context, confirm_window) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get(context, id): """Get fixed ip by id or raise if it does not exist.""" return IMPL.fixed_ip_get(context, id) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ips_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ips_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) def virtual_interface_get_all(context): """Gets all virtual interfaces from the table""" return IMPL.virtual_interface_get_all(context) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc'): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window) def instance_test_and_set(context, instance_id, attr, ok_states, new_state): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ return IMPL.instance_test_and_set( context, instance_id, attr, ok_states, new_state) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_uuid): """Get instance actions by instance uuid.""" return IMPL.instance_get_actions(context, instance_uuid) def instance_get_id_to_uuid_mapping(context, ids): """Return a dictionary containing 'ID: UUID' given the ids""" return IMPL.instance_get_id_to_uuid_mapping(context, ids) ################### def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ return IMPL.instance_info_cache_create(context, values) def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return IMPL.instance_info_cache_get(context, instance_uuid) def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ return IMPL.instance_info_cache_update(context, instance_uuid, values) def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ return IMPL.instance_info_cache_delete(context, instance_uuid) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id, host=None): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id, host) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" return IMPL.security_group_in_use(context, group_id) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) def security_group_count_by_project(context, project_id): """Count number of security groups in a project.""" return IMPL.security_group_count_by_project(context, project_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) def security_group_rule_count_by_group(context, security_group_id): """Count rules in a given security group.""" return IMPL.security_group_rule_count_by_group(context, security_group_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False, filters=None): """Get all instance types.""" return IMPL.instance_type_get_all( context, inactive=inactive, filters=filters) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) #################### def cell_create(context, values): """Create a new child Cell entry.""" return IMPL.cell_create(context, values) def cell_update(context, cell_id, values): """Update a child Cell entry.""" return IMPL.cell_update(context, cell_id, values) def cell_delete(context, cell_id): """Delete a child Cell.""" return IMPL.cell_delete(context, cell_id) def cell_get(context, cell_id): """Get a specific child Cell.""" return IMPL.cell_get(context, cell_id) def cell_get_all(context): """Get all child Cells.""" return IMPL.cell_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def bw_usage_get_by_macs(context, macs, start_period): """Return bw usages for an instance in a given audit period.""" return IMPL.bw_usage_get_by_macs(context, macs, start_period) def bw_usage_update(context, mac, start_period, bw_in, bw_out): """Update cached bw usage for an instance and network Creates new record if needed.""" return IMPL.bw_usage_update(context, mac, start_period, bw_in, bw_out) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) ################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" return IMPL.s3_image_get(context, image_id) def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" return IMPL.s3_image_get_by_uuid(context, image_uuid) def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" return IMPL.s3_image_create(context, image_uuid) #################### def sm_backend_conf_create(context, values): """Create a new SM Backend Config entry.""" return IMPL.sm_backend_conf_create(context, values) def sm_backend_conf_update(context, sm_backend_conf_id, values): """Update a SM Backend Config entry.""" return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) def sm_backend_conf_delete(context, sm_backend_conf_id): """Delete a SM Backend Config.""" return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) def sm_backend_conf_get(context, sm_backend_conf_id): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) def sm_backend_conf_get_by_sr(context, sr_uuid): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) def sm_backend_conf_get_all(context): """Get all SM Backend Configs.""" return IMPL.sm_backend_conf_get_all(context) #################### def sm_flavor_create(context, values): """Create a new SM Flavor entry.""" return IMPL.sm_flavor_create(context, values) def sm_flavor_update(context, sm_flavor_id, values): """Update a SM Flavor entry.""" return IMPL.sm_flavor_update(context, values) def sm_flavor_delete(context, sm_flavor_id): """Delete a SM Flavor.""" return IMPL.sm_flavor_delete(context, sm_flavor_id) def sm_flavor_get(context, sm_flavor): """Get a specific SM Flavor.""" return IMPL.sm_flavor_get(context, sm_flavor) def sm_flavor_get_all(context): """Get all SM Flavors.""" return IMPL.sm_flavor_get_all(context) #################### def sm_volume_create(context, values): """Create a new child Zone entry.""" return IMPL.sm_volume_create(context, values) def sm_volume_update(context, volume_id, values): """Update a child Zone entry.""" return IMPL.sm_volume_update(context, values) def sm_volume_delete(context, volume_id): """Delete a child Zone.""" return IMPL.sm_volume_delete(context, volume_id) def sm_volume_get(context, volume_id): """Get a specific child Zone.""" return IMPL.sm_volume_get(context, volume_id) def sm_volume_get_all(context): """Get all child Zones.""" return IMPL.sm_volume_get_all(context) #################### def aggregate_create(context, values, metadata=None): """Create a new aggregate with metadata.""" return IMPL.aggregate_create(context, values, metadata) def aggregate_get(context, aggregate_id, read_deleted='no'): """Get a specific aggregate by id.""" return IMPL.aggregate_get(context, aggregate_id, read_deleted) def aggregate_get_by_host(context, host, read_deleted='no'): """Get a specific aggregate by host""" return IMPL.aggregate_get_by_host(context, host, read_deleted) def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. If values contains a metadata key, it updates the aggregate metadata too.""" return IMPL.aggregate_update(context, aggregate_id, values) def aggregate_delete(context, aggregate_id): """Delete an aggregate.""" return IMPL.aggregate_delete(context, aggregate_id) def aggregate_get_all(context, read_deleted='yes'): """Get all aggregates.""" return IMPL.aggregate_get_all(context, read_deleted) def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): """Add/update metadata. If set_delete=True, it adds only.""" IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) def aggregate_metadata_get(context, aggregate_id, read_deleted='no'): """Get metadata for the specified aggregate.""" return IMPL.aggregate_metadata_get(context, aggregate_id, read_deleted) def aggregate_metadata_delete(context, aggregate_id, key): """Delete the given metadata key.""" IMPL.aggregate_metadata_delete(context, aggregate_id, key) def aggregate_host_add(context, aggregate_id, host): """Add host to the aggregate.""" IMPL.aggregate_host_add(context, aggregate_id, host) def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'): """Get hosts for the specified aggregate.""" return IMPL.aggregate_host_get_all(context, aggregate_id, read_deleted) def aggregate_host_delete(context, aggregate_id, host): """Delete the given host from the aggregate.""" IMPL.aggregate_host_delete(context, aggregate_id, host) #################### def instance_fault_create(context, values): """Create a new Instance Fault.""" return IMPL.instance_fault_create(context, values) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_3
crossvul-python_data_good_3724_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or user.get('enabled', True) == False: try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" self.assert_admin(context) # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3724_0
crossvul-python_data_bad_3633_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova import db from nova import exception from nova import quota from nova.api.openstack import extensions class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" return {'quota_set': { 'id': str(project_id), 'metadata_items': quota_set['metadata_items'], 'injected_file_content_bytes': quota_set['injected_file_content_bytes'], 'volumes': quota_set['volumes'], 'gigabytes': quota_set['gigabytes'], 'ram': quota_set['ram'], 'floating_ips': quota_set['floating_ips'], 'instances': quota_set['instances'], 'injected_files': quota_set['injected_files'], 'cores': quota_set['cores'], }} def show(self, req, id): context = req.environ['nova.context'] try: db.sqlalchemy.api.authorize_project_context(context, id) return self._format_quota_set(id, quota.get_project_quotas(context, id)) except exception.NotAuthorized: return webob.Response(status_int=403) def update(self, req, id, body): context = req.environ['nova.context'] project_id = id resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores'] for key in body['quota_set'].keys(): if key in resources: value = int(body['quota_set'][key]) try: db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) except exception.AdminRequired: return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} def defaults(self, req, id): return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): def get_name(self): return "Quotas" def get_alias(self): return "os-quota-sets" def get_description(self): return "Quotas management support" def get_namespace(self): return "http://docs.openstack.org/ext/quotas-sets/api/v1.1" def get_updated(self): return "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_1
crossvul-python_data_bad_1801_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-264/py/bad_1801_0
crossvul-python_data_bad_3633_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova import utils FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', 'Template string to be used to generate snapshot names') flags.DEFINE_string('vsa_name_template', 'vsa-%08x', 'Template string to be used to generate VSA names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') class NoMoreBlades(exception.Error): """No more available blades.""" pass class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available blades""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_network_sorted(context): """Get all network services sorted by network count. :returns: a list of (Service, network_count) tuples. """ return IMPL.service_get_all_network_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id, session=None): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_destroy(context, certificate_id): """Destroy the certificate or raise if it does not exist.""" return IMPL.certificate_destroy(context, certificate_id) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) def certificate_update(context, certificate_id, values): """Set the given properties on an certificate and update it. Raises NotFound if service does not exist. """ return IMPL.certificate_update(context, certificate_id, values) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_allocate_address(context, project_id): """Allocate free floating ip and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_all_by_instance_host(context, host): """Get all allocated fixed ips filtered by instance host.""" return IMPL.fixed_ip_get_all_by_instance_host(context, host) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ip_get_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_update(context, vif_id, values): """Update a virtual interface record in the database.""" return IMPL.virtual_interface_update(context, vif_id, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with.""" return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_get_by_network(context, network_id): """Gets all virtual interfaces on network.""" return IMPL.virtual_interface_get_by_network(context, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_stop(context, instance_id): """Stop the instance or raise if it does not exist.""" return IMPL.instance_stop(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_user(context, user_id): """Get all instances.""" return IMPL.instance_get_all_by_user(context, user_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_by_fixed_ip(context, address): """Get an instance for a fixed ip by address.""" return IMPL.instance_get_by_fixed_ip(context, address) def instance_get_by_fixed_ipv6(context, address): """Get an instance for a fixed ip by IPv6 address.""" return IMPL.instance_get_by_fixed_ipv6(context, address) def instance_get_fixed_addresses(context, instance_id): """Get the fixed ip address of an instance.""" return IMPL.instance_get_fixed_addresses(context, instance_id) def instance_get_fixed_addresses_v6(context, instance_id): return IMPL.instance_get_fixed_addresses_v6(context, instance_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_project_vpn(context, project_id): """Get a vpn instance by project or return None.""" return IMPL.instance_get_project_vpn(context, project_id) def instance_set_state(context, instance_id, state, description=None): """Set the state of an instance.""" return IMPL.instance_set_state(context, instance_id, state, description) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_id): """Get instance actions by instance id.""" return IMPL.instance_get_actions(context, instance_id) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_allocated_ips(context, network_id): """Return the number of allocated non-reserved ips in the network.""" return IMPL.network_count_allocated_ips(context, network_id) def network_count_available_ips(context, network_id): """Return the number of available ips in the network.""" return IMPL.network_count_available_ips(context, network_id) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_disassociate_all(context): """Disassociate all networks from projects.""" return IMPL.network_disassociate_all(context) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_get_vpn_ip(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_vpn_ip(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def export_device_count(context): """Return count of export devices.""" return IMPL.export_device_count(context) def export_device_create_safe(context, values): """Create an export_device from the values dictionary. The device is not returned. If the create violates the unique constraints because the shelf_id and blade_id already exist, no exception is raised. """ return IMPL.export_device_create_safe(context, values) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" return IMPL.volume_allocate_shelf_and_blade(context, volume_id) def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_shelf_and_blade(context, volume_id): """Get the shelf and blade allocated to the volume.""" return IMPL.volume_get_shelf_and_blade(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) def security_group_destroy_all(context): """Deletes a security group.""" return IMPL.security_group_destroy_all(context) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_get_all_by_cidr(context, cidr): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) def project_get_networks_v6(context, project_id): return IMPL.project_get_networks_v6(context, project_id) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False): """Get all instance types.""" return IMPL.instance_type_get_all(context, inactive) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) def instance_type_purge(context, name): """Purges (removes) an instance type from DB. Use instance_type_destroy for most cases """ return IMPL.instance_type_purge(context, name) #################### def zone_create(context, values): """Create a new child Zone entry.""" return IMPL.zone_create(context, values) def zone_update(context, zone_id, values): """Update a child Zone entry.""" return IMPL.zone_update(context, zone_id, values) def zone_delete(context, zone_id): """Delete a child Zone.""" return IMPL.zone_delete(context, zone_id) def zone_get(context, zone_id): """Get a specific child Zone.""" return IMPL.zone_get(context, zone_id) def zone_get_all(context): """Get all child Zones.""" return IMPL.zone_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) def volume_type_purge(context, name): """Purges (removes) a volume type from DB. Use volume_type_destroy for most cases """ return IMPL.volume_type_purge(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) #################### def vsa_create(context, values): """Creates Virtual Storage Array record.""" return IMPL.vsa_create(context, values) def vsa_update(context, vsa_id, values): """Updates Virtual Storage Array record.""" return IMPL.vsa_update(context, vsa_id, values) def vsa_destroy(context, vsa_id): """Deletes Virtual Storage Array record.""" return IMPL.vsa_destroy(context, vsa_id) def vsa_get(context, vsa_id): """Get Virtual Storage Array record by ID.""" return IMPL.vsa_get(context, vsa_id) def vsa_get_all(context): """Get all Virtual Storage Array records.""" return IMPL.vsa_get_all(context) def vsa_get_all_by_project(context, project_id): """Get all Virtual Storage Array records by project ID.""" return IMPL.vsa_get_all_by_project(context, project_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_3
crossvul-python_data_bad_3694_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): user_ref = self.update_user(context, user_id, user) try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The password has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('Password changed for %s, but existing tokens remain ' 'valid' % user_id) return user_ref def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3694_0
crossvul-python_data_good_2042_0
from django.core.exceptions import SuspiciousOperation class DisallowedModelAdminLookup(SuspiciousOperation): """Invalid filter was passed to admin view via URL querystring""" pass class DisallowedModelAdminToField(SuspiciousOperation): """Invalid to_field was passed to admin view via URL query string""" pass
./CrossVul/dataset_final_sorted/CWE-264/py/good_2042_0
crossvul-python_data_bad_3634_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.rpc import common as rpc_common from nova import utils from nova import volume FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc_common.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') if FLAGS.auth_strategy == 'deprecated': i['imageOwnerId'] = image['properties'].get('project_id') else: i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_0
crossvul-python_data_good_2014_1
# -*- coding: utf-8 -*- """ jinja2.bccache ~~~~~~~~~~~~~~ This module implements the bytecode cache system Jinja is optionally using. This is useful if you have very complex template situations and the compiliation of all those templates slow down your application too much. Situations where this is useful are often forking web applications that are initialized on the first request. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ from os import path, listdir import os import sys import errno import marshal import tempfile import fnmatch from hashlib import sha1 from jinja2.utils import open_if_exists from jinja2._compat import BytesIO, pickle, PY2, text_type # marshal works better on 3.x, one hack less required if not PY2: marshal_dump = marshal.dump marshal_load = marshal.load else: def marshal_dump(code, f): if isinstance(f, file): marshal.dump(code, f) else: f.write(marshal.dumps(code)) def marshal_load(f): if isinstance(f, file): return marshal.load(f) return marshal.loads(f.read()) bc_version = 2 # magic version used to only change with new jinja versions. With 2.6 # we change this to also take Python version changes into account. The # reason for this is that Python tends to segfault if fed earlier bytecode # versions because someone thought it would be a good idea to reuse opcodes # or make Python incompatible with earlier versions. bc_magic = 'j2'.encode('ascii') + \ pickle.dumps(bc_version, 2) + \ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) class Bucket(object): """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): """Resets the bucket (unloads the bytecode).""" self.code = None def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return self.code = marshal_load(f) def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError('can\'t write empty bucket') f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): """Load bytecode from a string.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache(object): """To implement your own bytecode cache you have to subclass this class and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of these methods are passed a :class:`~jinja2.bccache.Bucket`. A very basic bytecode cache that saves the bytecode on the file system:: from os import path class MyCache(BytecodeCache): def __init__(self, directory): self.directory = directory def load_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) if path.exists(filename): with open(filename, 'rb') as f: bucket.load_bytecode(f) def dump_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) with open(filename, 'wb') as f: bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of Jinja2. """ def load_bytecode(self, bucket): """Subclasses have to override this method to load bytecode into a bucket. If they are not able to find code in the cache for the bucket, it must not do anything. """ raise NotImplementedError() def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError() def clear(self): """Clears the cache. This method is not used by Jinja2 but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" hash = sha1(name.encode('utf-8')) if filename is not None: filename = '|' + filename if isinstance(filename, text_type): filename = filename.encode('utf-8') hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" return sha1(source.encode('utf-8')).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are mandatory but filename may be `None`. """ key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): """Put the bucket into the cache.""" self.dump_bytecode(bucket) class FileSystemBytecodeCache(BytecodeCache): """A bytecode cache that stores bytecode on the filesystem. It accepts two arguments: The directory where the cache items are stored and a pattern string that is used to build the filename. If no directory is specified a default cache directory is selected. On Windows the user's temp directory is used, on UNIX systems a directory is created for the user in the system temp directory. The pattern can be used to have multiple separate caches operate on the same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` is replaced with the cache key. >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') This bytecode cache supports clearing of the cache using the clear method. """ def __init__(self, directory=None, pattern='__jinja2_%s.cache'): if directory is None: directory = self._get_default_cache_dir() self.directory = directory self.pattern = pattern def _get_default_cache_dir(self): tmpdir = tempfile.gettempdir() # On windows the temporary directory is used specific unless # explicitly forced otherwise. We can just use that. if os.name == 'n': return tmpdir if not hasattr(os, 'getuid'): raise RuntimeError('Cannot determine safe temp directory. You ' 'need to explicitly provide one.') dirname = '_jinja2-cache-%d' % os.getuid() actual_dir = os.path.join(tmpdir, dirname) try: os.mkdir(actual_dir, 0700) except OSError as e: if e.errno != errno.EEXIST: raise return actual_dir def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): f = open_if_exists(self._get_cache_filename(bucket), 'rb') if f is not None: try: bucket.load_bytecode(f) finally: f.close() def dump_bytecode(self, bucket): f = open(self._get_cache_filename(bucket), 'wb') try: bucket.write_bytecode(f) finally: f.close() def clear(self): # imported lazily here because google app-engine doesn't support # write access on the file system and the function does not exist # normally. from os import remove files = fnmatch.filter(listdir(self.directory), self.pattern % '*') for filename in files: try: remove(path.join(self.directory, filename)) except OSError: pass class MemcachedBytecodeCache(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache - `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_ - `cmemcache <http://gijsbert.org/cmemcache/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. .. versionadded:: 2.7 Added support for ignoring memcache errors through the `ignore_memcache_errors` parameter. """ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, ignore_memcache_errors=True): self.client = client self.prefix = prefix self.timeout = timeout self.ignore_memcache_errors = ignore_memcache_errors def load_bytecode(self, bucket): try: code = self.client.get(self.prefix + bucket.key) except Exception: if not self.ignore_memcache_errors: raise code = None if code is not None: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket): args = (self.prefix + bucket.key, bucket.bytecode_to_string()) if self.timeout is not None: args += (self.timeout,) try: self.client.set(*args) except Exception: if not self.ignore_memcache_errors: raise
./CrossVul/dataset_final_sorted/CWE-264/py/good_2014_1
crossvul-python_data_bad_3632_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import datetime import functools import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import utils from nova import log as logging from nova.compute import aggregate_states from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import and_ from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS flags.DECLARE('reserved_host_disk_mb', 'nova.scheduler.host_manager') flags.DECLARE('reserved_host_memory_mb', 'nova.scheduler.host_manager') LOG = logging.getLogger(__name__) def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): db.aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and service_ref.compute_node: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): result = model_query(context, models.Service, session=session).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic="compute").\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = model_query(context, models.Instance.host, func.sum(models.Instance.vcpus).label(label), session=session, read_deleted="no").\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_get_all(context, session=None): return model_query(context, models.ComputeNode, session=session).\ options(joinedload('service')).\ all() def _get_host_utilization(context, host, ram_mb, disk_gb): """Compute the current utilization of a given host.""" instances = instance_get_all_by_host(context, host) vms = len(instances) free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024) work = 0 for instance in instances: free_ram_mb -= instance.memory_mb free_disk_gb -= instance.root_gb free_disk_gb -= instance.ephemeral_gb if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING, vm_states.MIGRATING, vm_states.RESIZING]: work += 1 return dict(free_ram_mb=free_ram_mb, free_disk_gb=free_disk_gb, current_workload=work, running_vms=vms) def _adjust_compute_node_values_for_utilization(context, values, session): service_ref = service_get(context, values['service_id'], session=session) host = service_ref['host'] ram_mb = values['memory_mb'] disk_gb = values['local_gb'] values.update(_get_host_utilization(context, host, ram_mb, disk_gb)) @require_admin_context def compute_node_create(context, values, session=None): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" if not session: session = get_session() _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_node_ref = models.ComputeNode() session.add(compute_node_ref) compute_node_ref.update(values) return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values, auto_adjust): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" session = get_session() if auto_adjust: _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) def compute_node_get_by_host(context, host): """Get all capacity entries for the given host.""" session = get_session() with session.begin(): node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False) return node.first() def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): """Update a specific ComputeNode entry by a series of deltas. Do this as a single atomic action and lock the row for the duration of the operation. Requires that ComputeNode record exist.""" session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) # This table thingy is how we get atomic UPDATE x = x + 1 # semantics. table = models.ComputeNode.__table__ if free_ram_mb_delta != 0: compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta if free_disk_gb_delta != 0: compute_node.free_disk_gb = (table.c.free_disk_gb + free_disk_gb_delta) if work_delta != 0: compute_node.current_workload = (table.c.current_workload + work_delta) if vm_delta != 0: compute_node.running_vms = table.c.running_vms + vm_delta return compute_node def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): """Like compute_node_utilization_update() modify a specific host entry. But this function will set the metrics absolutely (vs. a delta update). """ session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) if free_ram_mb != None: compute_node.free_ram_mb = free_ram_mb if free_disk_gb != None: compute_node.free_disk_gb = free_disk_gb if work != None: compute_node.current_workload = work if vms != None: compute_node.running_vms = vms return compute_node ################### @require_admin_context def certificate_get(context, certificate_id, session=None): result = model_query(context, models.Certificate, session=session).\ filter_by(id=certificate_id).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_admin_context def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_get_pools(context): session = get_session() pools = [] for result in session.query(models.FloatingIp.pool).distinct(): pools.append({'name': result[0]}) return pools @require_context def floating_ip_allocate_address(context, project_id, pool): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = fixed_ip_get(context, floating_ip_ref['fixed_ip_id']) if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) def _floating_ip_get_all(context): return model_query(context, models.FloatingIp, read_deleted="no") @require_admin_context def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address, session=None): if not session: session = get_session() fixed_ip = fixed_ip_get_by_address(context, fixed_address, session) fixed_ip_id = fixed_ip['id'] return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None): if not session: session = get_session() return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) @require_context def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) @require_admin_context def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref @require_admin_context def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone domain_ref.save(session=session) @require_admin_context def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project domain_ref.save(session=session) @require_admin_context def dnsdomain_unregister(context, fqdomain): session = get_session() with session.begin(): session.query(models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() @require_context def dnsdomain_list(context): session = get_session() records = model_query(context, models.DNSDomain, session=session, read_deleted="no").\ with_lockmode('update').all() domains = [] for record in records: domains.append(record.domain) return domains ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance_id: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network_id: fixed_ip_ref.network_id = network_id fixed_ip_ref.instance_id = instance_id session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_id=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if fixed_ip_ref['network_id'] is None: fixed_ip_ref['network'] = network_id if instance_id: fixed_ip_ref['instance_id'] = instance_id if host: fixed_ip_ref['host'] = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref['instance_id'] = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == True), models.Network.host == host) result = session.query(models.FixedIp.id).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.allocated == False).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.id == models.FixedIp.instance_id)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, session=None): result = model_query(context, models.FixedIp, session=session).\ filter_by(id=id).\ first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_all(context, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ filter_by(address=address).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return result @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ all() return result @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def _virtual_interface_query(context, session=None): return model_query(context, models.VirtualInterface, session=session, read_deleted="yes") @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context, session=session).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) @require_context def virtual_interface_get_all(context): """Get all vifs""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values = values.copy() values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) # and creat the info_cache table entry for instance instance_info_cache_create(context, {'instance_id': instance_ref['uuid']}) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) instance_id = instance_ref['id'] else: instance_ref = instance_get(context, instance_id, session=session) session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) instance_info_cache_delete(context, instance_ref['uuid'], session=session) return instance_ref @require_context def instance_get_by_uuid(context, uuid, session=None): result = _build_instance_get(context, session=session).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): result = _build_instance_get(context, session=session).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): return model_query(context, models.Instance, session=session, project_only=True).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all(context): return model_query(context, models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ all() @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False sort_fn = {'desc': desc, 'asc': asc} session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = utils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): deleted = or_(models.Instance.deleted == True, models.Instance.vm_state == vm_states.SOFT_DELETE) query_prefix = query_prefix.filter(deleted) else: query_prefix = query_prefix.\ filter_by(deleted=False).\ filter(models.Instance.vm_state != vm_states.SOFT_DELETE) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid'] # Filter the query query_prefix = exact_filter(query_prefix, models.Instance, filters, exact_match_filter_names) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) if not instances: break return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def _instance_get_all_query(context, project_only=False): return model_query(context, models.Instance, project_only=project_only).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all_by_host(context, host): return _instance_get_all_query(context).filter_by(host=host).all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _instance_get_all_query(context).\ filter_by(project_id=project_id).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): return _instance_get_all_query(context, project_only=True).\ filter_by(reservation_id=reservation_id).\ all() # NOTE(jkoelker) This is only being left here for compat with floating # ips. Currently the network_api doesn't return floaters # in network_info. Once it starts return the model. This # function and its call in compute/manager.py on 1829 can # go away @require_context def instance_get_floating_address(context, instance_id): fixed_ips = fixed_ip_get_by_instance(context, instance_id) if not fixed_ips: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips floating_ips = floating_ip_get_by_fixed_address(context, fixed_ips[0]['address']) if not floating_ips: return None # NOTE(vish): this just returns the first floating ip return floating_ips[0]['address'] @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window, session=None): reboot_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=reboot_window) if not session: session = get_session() results = session.query(models.Instance).\ filter(models.Instance.updated_at <= reboot_window).\ filter_by(task_state="rebooting").all() return results @require_context def instance_test_and_set(context, instance_id, attr, ok_states, new_state, session=None): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ if not session: session = get_session() with session.begin(): query = model_query(context, models.Instance, session=session, project_only=True) if utils.is_uuid_like(instance_id): query = query.filter_by(uuid=instance_id) else: query = query.filter_by(id=instance_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues instance = query.with_lockmode('update').first() state = instance[attr] if state not in ok_states: raise exception.InstanceInvalidState( attr=attr, instance_uuid=instance['uuid'], state=state, method='instance_test_and_set') instance[attr] = new_state instance.save(session=session) @require_context def instance_update(context, instance_id, values): session = get_session() if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_ref['id'], values.pop('metadata'), delete=True) with session.begin(): instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_ref['id']).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_uuid): """Return the actions associated to the given instance id""" session = get_session() return session.query(models.InstanceActions).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def instance_get_id_to_uuid_mapping(context, ids): session = get_session() instances = session.query(models.Instance).\ filter(models.Instance.id.in_(ids)).\ all() mapping = {} for instance in instances: mapping[instance['id']] = instance['uuid'] return mapping ################### @require_context def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ info_cache = models.InstanceInfoCache() info_cache.update(values) session = get_session() with session.begin(): info_cache.save(session=session) return info_cache @require_context def instance_info_cache_get(context, instance_uuid, session=None): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance :param session: = optional session object """ session = session or get_session() info_cache = session.query(models.InstanceInfoCache).\ filter_by(instance_id=instance_uuid).\ first() return info_cache @require_context def instance_info_cache_update(context, instance_uuid, values, session=None): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update :param session: = optional session object """ session = session or get_session() info_cache = instance_info_cache_get(context, instance_uuid, session=session) if info_cache: info_cache.update(values) info_cache.save(session=session) else: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry values['instance_id'] = instance_uuid info_cache = instance_info_cache_create(context, values) return info_cache @require_context def instance_info_cache_delete(context, instance_uuid, session=None): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ values = {'deleted': True, 'deleted_at': utils.utcnow()} instance_info_cache_update(context, instance_uuid, values, session) ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) result = model_query(context, models.KeyPair, session=session).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): return model_query(context, models.Network).count() @require_admin_context def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) @require_admin_context def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() @require_admin_context def network_create_safe(context, values): if values.get('vlan'): if model_query(context, models.Network, read_deleted="no")\ .filter_by(vlan=values['vlan'])\ .first(): raise exception.DuplicateVlan(vlan=values['vlan']) network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): result = session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(deleted=False).\ filter_by(allocated=True).\ all() if result: raise exception.NetworkInUse(network_id=network_id) network_ref = network_get(context, network_id=network_id, session=session) session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(deleted=False).\ update({'deleted': True, 'updated_at': literal_column('updated_at'), 'deleted_at': utils.utcnow()}) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_context def network_get(context, network_id, session=None): result = model_query(context, models.Network, session=session, project_only=True).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): result = model_query(context, models.Network, read_deleted="no").all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = model_query(context, models.Network, read_deleted="no").\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == False) inst_and = and_(models.Instance.id == models.FixedIp.instance_id, models.Instance.deleted == False) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_id, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None) if host: query = query.filter(models.Instance.host == host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_id'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] data.append(cleaned) return data @require_admin_context def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") @require_admin_context def network_get_by_bridge(context, bridge): result = _network_get_query(context).filter_by(bridge=bridge).first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ first() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_instance(context, instance_id): result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_host(context, host): session = get_session() fixed_ip_query = model_query(context, models.FixedIp.network_id, session=session).\ filter(models.FixedIp.host == host) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = _network_get_query(context, session=session).\ filter_by(id=network_id).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): result = model_query(context, models.AuthToken, session=session).\ filter_by(token_hash=token_hash).\ first() if not result: raise exception.AuthTokenNotFound(token=token_hash) return result @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_context def quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save() return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit quota_class_ref.save(session=session) @require_admin_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = quota_class_get(context, class_name, resource, session=session) quota_class_ref.delete(session=session) @require_admin_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ filter_by(volume=None).\ filter_by(host=host).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')) @require_context def volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): return _volume_get_query(context).all() @require_admin_context def volume_get_all_by_host(context, host): return _volume_get_query(context).filter_by(host=host).all() @require_admin_context def volume_get_all_by_instance(context, instance_id): result = model_query(context, models.Volume, read_deleted="no").\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _volume_get_query(context).filter_by(project_id=project_id).all() @require_admin_context def volume_get_instance(context, volume_id): result = _volume_get_query(context).filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### def _volume_metadata_get_query(context, volume_id, session=None): return model_query(context, models.VolumeMetadata, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) @require_context @require_volume_exists def volume_metadata_get(context, volume_id): rows = _volume_metadata_get_query(context, volume_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): _volume_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): result = _volume_metadata_get_query(context, volume_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): return model_query(context, models.Snapshot).all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ filter_by(project_id=project_id).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### def _block_device_mapping_get_query(context, session=None): return model_query(context, models.BlockDeviceMapping, session=session, read_deleted="no") @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(id=bdm_id).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): return _block_device_mapping_get_query(context).\ filter_by(instance_id=instance_id).\ all() @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_get_query(context, session=None, read_deleted=None, project_only=False): return model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only).\ options(joinedload_all('rules')) @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, session=None): result = _security_group_get_query(context, session=session, project_only=True).\ filter_by(id=security_group_id).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): result = _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_id): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=group_id).\ filter_by(deleted=False).\ all() for ia in inst_assoc: num_instances = session.query(models.Instance).\ filter_by(deleted=False).\ filter_by(id=ia.instance_id).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id, session=None): result = _security_group_rule_get_query(context, session=session).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(group_id=security_group_id).\ all() @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): result = model_query(context, models.User, session=session).\ filter_by(id=id).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): result = model_query(context, models.User, session=session).\ filter_by(access_key=access_key).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): return model_query(context, models.User).all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) ################### def project_create(context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): result = model_query(context, models.Project, session=session, read_deleted="no").\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): return model_query(context, models.Project).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): user = model_query(context, models.User).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @require_admin_context def migration_get_all_unconfirmed(context, confirm_window, session=None): confirm_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=confirm_window) return model_query(context, models.Migration, session=session, read_deleted="yes").\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): return model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_id=instance_id).\ all() def console_get(context, console_id, instance_id=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_id is not None: query = query.filter_by(instance_id=instance_id) result = query.first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: instance_type_get_by_name(context, values['name'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.InstanceTypeNotFoundByName: pass try: instance_type_get_by_flavor_id(context, values['flavorid'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.FlavorNotFound: pass try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save(session=session) except Exception, e: raise exception.DBError(e) return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance, volume, or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _instance_type_get_query(context, session=None, read_deleted=None): return model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) @require_context def instance_type_get_all(context, inactive=False, filters=None): """ Returns all instance types. """ filters = filters or {} read_deleted = "yes" if inactive else "no" query = _instance_type_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) inst_types = query.order_by("name").all() return [_dict_with_extra_specs(i) for i in inst_types] @require_context def instance_type_get(context, id, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(id=id).\ first() if not result: raise exception.InstanceTypeNotFound(instance_type_id=id) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_name(context, name, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(name=name).\ first() if not result: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_flavor_id(context, flavor_id, session=None): """Returns a dict describing specific flavor_id""" result = _instance_type_get_query(context, session=session).\ filter_by(flavorid=flavor_id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @require_admin_context def instance_type_destroy(context, name): """Marks specific instance_type as deleted""" session = get_session() with session.begin(): instance_type_ref = instance_type_get_by_name(context, name, session=session) instance_type_id = instance_type_ref['id'] session.query(models.InstanceTypes).\ filter_by(id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### @require_admin_context def cell_create(context, values): cell = models.Cell() cell.update(values) cell.save() return cell def _cell_get_by_id_query(context, cell_id, session=None): return model_query(context, models.Cell, session=session).\ filter_by(id=cell_id) @require_admin_context def cell_update(context, cell_id, values): cell = cell_get(context, cell_id) cell.update(values) cell.save() return cell @require_admin_context def cell_delete(context, cell_id): session = get_session() with session.begin(): _cell_get_by_id_query(context, cell_id, session=session).\ delete() @require_admin_context def cell_get(context, cell_id): result = _cell_get_by_id_query(context, cell_id).first() if not result: raise exception.CellNotFound(cell_id=cell_id) return result @require_admin_context def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() #################### def _instance_metadata_get_query(context, instance_id, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_id=instance_id) @require_context @require_instance_exists def instance_metadata_get(context, instance_id): rows = _instance_metadata_get_query(context, instance_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): _instance_metadata_get_query(context, instance_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): result = _instance_metadata_get_query( context, instance_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): return model_query(context, models.AgentBuild, session=session, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() @require_admin_context def agent_build_get_all(context): return model_query(context, models.AgentBuild, read_deleted="no").\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def bw_usage_get_by_macs(context, macs, start_period): return model_query(context, models.BandwidthUsage, read_deleted="yes").\ filter(models.BandwidthUsage.mac.in_(macs)).\ filter_by(start_period=start_period).\ all() @require_context def bw_usage_update(context, mac, start_period, bw_in, bw_out, session=None): if not session: session = get_session() with session.begin(): bwusage = model_query(context, models.BandwidthUsage, session=session, read_deleted="yes").\ filter_by(start_period=start_period).\ filter_by(mac=mac).\ first() if not bwusage: bwusage = models.BandwidthUsage() bwusage.start_period = start_period bwusage.mac = mac bwusage.last_refreshed = utils.utcnow() bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.save(session=session) #################### def _instance_type_extra_specs_get_query(context, instance_type_id, session=None): return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id) @require_context def instance_type_extra_specs_get(context, instance_type_id): rows = _instance_type_extra_specs_get_query( context, instance_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): _instance_type_extra_specs_get_query( context, instance_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): result = _instance_type_extra_specs_get_query( context, instance_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceTypeExtraSpecsNotFound( extra_specs_key=key, instance_type_id=instance_type_id) return result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(name=values['name']) except exception.VolumeTypeNotFoundByName: pass try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): """ Returns a dict describing all volume_types with name as key. """ filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ order_by("name").\ all() # TODO(sirp): this patern of converting rows to a result with extra_specs # is repeated quite a bit, might be worth creating a method for it result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result @require_context def volume_type_get(context, id, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type=id) return _dict_with_extra_specs(result) @require_context def volume_type_get_by_name(context, name, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(result) @require_admin_context def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_query(context, volume_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception, e: raise exception.DBError(e) return s3_image_ref #################### @require_admin_context def sm_backend_conf_create(context, values): backend_conf = models.SMBackendConf() backend_conf.update(values) backend_conf.save() return backend_conf @require_admin_context def sm_backend_conf_update(context, sm_backend_id, values): session = get_session() with session.begin(): backend_conf = model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not backend_conf: raise exception.NotFound( _("No backend config with id %(sm_backend_id)s") % locals()) backend_conf.update(values) backend_conf.save(session=session) return backend_conf @require_admin_context def sm_backend_conf_delete(context, sm_backend_id): # FIXME(sirp): for consistency, shouldn't this just mark as deleted with # `purge` actually deleting the record? session = get_session() with session.begin(): model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ delete() @require_admin_context def sm_backend_conf_get(context, sm_backend_id): result = model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not result: raise exception.NotFound(_("No backend config with id " "%(sm_backend_id)s") % locals()) return result @require_admin_context def sm_backend_conf_get_by_sr(context, sr_uuid): session = get_session() return model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(sr_uuid=sr_uuid).\ first() @require_admin_context def sm_backend_conf_get_all(context): return model_query(context, models.SMBackendConf, read_deleted="yes").\ all() #################### def _sm_flavor_get_query(context, sm_flavor_label, session=None): return model_query(context, models.SMFlavors, session=session, read_deleted="yes").\ filter_by(label=sm_flavor_label) @require_admin_context def sm_flavor_create(context, values): sm_flavor = models.SMFlavors() sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_update(context, sm_flavor_label, values): sm_flavor = sm_flavor_get(context, sm_flavor_label) sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_delete(context, sm_flavor_label): session = get_session() with session.begin(): _sm_flavor_get_query(context, sm_flavor_label).delete() @require_admin_context def sm_flavor_get(context, sm_flavor_label): result = _sm_flavor_get_query(context, sm_flavor_label).first() if not result: raise exception.NotFound( _("No sm_flavor called %(sm_flavor)s") % locals()) return result @require_admin_context def sm_flavor_get_all(context): return model_query(context, models.SMFlavors, read_deleted="yes").all() ############################### def _sm_volume_get_query(context, volume_id, session=None): return model_query(context, models.SMVolume, session=session, read_deleted="yes").\ filter_by(id=volume_id) def sm_volume_create(context, values): sm_volume = models.SMVolume() sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_update(context, volume_id, values): sm_volume = sm_volume_get(context, volume_id) sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_delete(context, volume_id): session = get_session() with session.begin(): _sm_volume_get_query(context, volume_id, session=session).delete() def sm_volume_get(context, volume_id): result = _sm_volume_get_query(context, volume_id).first() if not result: raise exception.NotFound( _("No sm_volume with id %(volume_id)s") % locals()) return result def sm_volume_get_all(context): return model_query(context, models.SMVolume, read_deleted="yes").all() ################ def _aggregate_get_query(context, model_class, id_field, id, session=None, read_deleted=None): return model_query(context, model_class, session=session, read_deleted=read_deleted).filter(id_field == id) @require_admin_context def aggregate_create(context, values, metadata=None): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='yes').first() values.setdefault('operational_state', aggregate_states.CREATED) if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) elif aggregate.deleted: values['deleted'] = False values['deleted_at'] = None aggregate.update(values) aggregate.save(session=session) else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate @require_admin_context def aggregate_get(context, aggregate_id): aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id).first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @require_admin_context def aggregate_get_by_host(context, host): aggregate_host = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.host, host).first() if not aggregate_host: raise exception.AggregateHostNotFound(host=host) return aggregate_get(context, aggregate_host.aggregate_id) @require_admin_context def aggregate_update(context, aggregate_id, values): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).first() if aggregate: metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=True) with session.begin(): aggregate.update(values) aggregate.save(session=session) values['metadata'] = metadata return aggregate else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_delete(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'operational_state': aggregate_states.DISMISSED, 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_get_all(context): return model_query(context, models.Aggregate).all() @require_admin_context @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id): rows = model_query(context, models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).all() return dict([(r['key'], r['value']) for r in rows]) @require_admin_context @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): query = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id).\ filter_by(key=key) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_admin_context @require_aggregate_exists def aggregate_metadata_get_item(context, aggregate_id, key, session=None): result = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(key=key).first() if not result: raise exception.AggregateMetadataNotFound(metadata_key=key, aggregate_id=aggregate_id) return result @require_admin_context @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): session = get_session() if set_delete: original_metadata = aggregate_metadata_get(context, aggregate_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None for meta_key, meta_value in metadata.iteritems(): item = {"value": meta_value} try: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) if meta_ref.deleted: item.update({'deleted': False, 'deleted_at': None}) except exception.AggregateMetadataNotFound: meta_ref = models.AggregateMetadata() item.update({"key": meta_key, "aggregate_id": aggregate_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata @require_admin_context @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id): rows = model_query(context, models.AggregateHost).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_admin_context @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): query = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id).filter_by(host=host) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_admin_context @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): session = get_session() host_ref = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(host=host).first() if not host_ref: try: host_ref = models.AggregateHost() values = {"host": host, "aggregate_id": aggregate_id, } host_ref.update(values) host_ref.save(session=session) except exception.DBError: raise exception.AggregateHostConflict(host=host) elif host_ref.deleted: host_ref.update({'deleted': False, 'deleted_at': None}) host_ref.save(session=session) else: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref.iteritems()) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row.iteritems()) output[row['instance_uuid']].append(data) return output
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3632_3
crossvul-python_data_bad_1622_1
import argparse from binascii import hexlify from datetime import datetime from operator import attrgetter import functools import io import os import stat import sys import textwrap from attic import __version__ from attic.archive import Archive, ArchiveChecker from attic.repository import Repository from attic.cache import Cache from attic.key import key_creator from attic.helpers import Error, location_validator, format_time, \ format_file_mode, ExcludePattern, exclude_path, adjust_patterns, to_localtime, \ get_cache_dir, get_keys_dir, format_timedelta, prune_within, prune_split, \ Manifest, remove_surrogates, update_excludes, format_archive, check_extension_modules, Statistics, \ is_cachedir, bigint_to_int from attic.remote import RepositoryServer, RemoteRepository class Archiver: def __init__(self): self.exit_code = 0 def open_repository(self, location, create=False, exclusive=False): if location.proto == 'ssh': repository = RemoteRepository(location, create=create) else: repository = Repository(location.path, create=create, exclusive=exclusive) repository._location = location return repository def print_error(self, msg, *args): msg = args and msg % args or msg self.exit_code = 1 print('attic: ' + msg, file=sys.stderr) def print_verbose(self, msg, *args, **kw): if self.verbose: msg = args and msg % args or msg if kw.get('newline', True): print(msg) else: print(msg, end=' ') def do_serve(self, args): """Start Attic in server mode. This command is usually not used manually. """ return RepositoryServer(restrict_to_paths=args.restrict_to_paths).serve() def do_init(self, args): """Initialize an empty repository""" print('Initializing repository at "%s"' % args.repository.orig) repository = self.open_repository(args.repository, create=True, exclusive=True) key = key_creator(repository, args) manifest = Manifest(key, repository) manifest.key = key manifest.write() repository.commit() return self.exit_code def do_check(self, args): """Check repository consistency""" repository = self.open_repository(args.repository, exclusive=args.repair) if args.repair: while not os.environ.get('ATTIC_CHECK_I_KNOW_WHAT_I_AM_DOING'): self.print_error("""Warning: 'check --repair' is an experimental feature that might result in data loss. Type "Yes I am sure" if you understand this and want to continue.\n""") if input('Do you want to continue? ') == 'Yes I am sure': break if not args.archives_only: print('Starting repository check...') if repository.check(repair=args.repair): print('Repository check complete, no problems found.') else: return 1 if not args.repo_only and not ArchiveChecker().check(repository, repair=args.repair): return 1 return 0 def do_change_passphrase(self, args): """Change repository key file passphrase""" repository = self.open_repository(args.repository) manifest, key = Manifest.load(repository) key.change_passphrase() return 0 def do_create(self, args): """Create new archive""" t0 = datetime.now() repository = self.open_repository(args.archive, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache, create=True, checkpoint_interval=args.checkpoint_interval, numeric_owner=args.numeric_owner) # Add Attic cache dir to inode_skip list skip_inodes = set() try: st = os.stat(get_cache_dir()) skip_inodes.add((st.st_ino, st.st_dev)) except IOError: pass # Add local repository dir to inode_skip list if not args.archive.host: try: st = os.stat(args.archive.path) skip_inodes.add((st.st_ino, st.st_dev)) except IOError: pass for path in args.paths: path = os.path.normpath(path) if args.dontcross: try: restrict_dev = os.lstat(path).st_dev except OSError as e: self.print_error('%s: %s', path, e) continue else: restrict_dev = None self._process(archive, cache, args.excludes, args.exclude_caches, skip_inodes, path, restrict_dev) archive.save() if args.stats: t = datetime.now() diff = t - t0 print('-' * 78) print('Archive name: %s' % args.archive.archive) print('Archive fingerprint: %s' % hexlify(archive.id).decode('ascii')) print('Start time: %s' % t0.strftime('%c')) print('End time: %s' % t.strftime('%c')) print('Duration: %s' % format_timedelta(diff)) print('Number of files: %d' % archive.stats.nfiles) archive.stats.print_('This archive:', cache) print('-' * 78) return self.exit_code def _process(self, archive, cache, excludes, exclude_caches, skip_inodes, path, restrict_dev): if exclude_path(path, excludes): return try: st = os.lstat(path) except OSError as e: self.print_error('%s: %s', path, e) return if (st.st_ino, st.st_dev) in skip_inodes: return # Entering a new filesystem? if restrict_dev and st.st_dev != restrict_dev: return # Ignore unix sockets if stat.S_ISSOCK(st.st_mode): return self.print_verbose(remove_surrogates(path)) if stat.S_ISREG(st.st_mode): try: archive.process_file(path, st, cache) except IOError as e: self.print_error('%s: %s', path, e) elif stat.S_ISDIR(st.st_mode): if exclude_caches and is_cachedir(path): return archive.process_item(path, st) try: entries = os.listdir(path) except OSError as e: self.print_error('%s: %s', path, e) else: for filename in sorted(entries): self._process(archive, cache, excludes, exclude_caches, skip_inodes, os.path.join(path, filename), restrict_dev) elif stat.S_ISLNK(st.st_mode): archive.process_symlink(path, st) elif stat.S_ISFIFO(st.st_mode): archive.process_item(path, st) elif stat.S_ISCHR(st.st_mode) or stat.S_ISBLK(st.st_mode): archive.process_dev(path, st) else: self.print_error('Unknown file type: %s', path) def do_extract(self, args): """Extract archive contents""" # be restrictive when restoring files, restore permissions later os.umask(0o077) repository = self.open_repository(args.archive) manifest, key = Manifest.load(repository) archive = Archive(repository, key, manifest, args.archive.archive, numeric_owner=args.numeric_owner) patterns = adjust_patterns(args.paths, args.excludes) dry_run = args.dry_run strip_components = args.strip_components dirs = [] for item in archive.iter_items(lambda item: not exclude_path(item[b'path'], patterns), preload=True): orig_path = item[b'path'] if strip_components: item[b'path'] = os.sep.join(orig_path.split(os.sep)[strip_components:]) if not item[b'path']: continue if not args.dry_run: while dirs and not item[b'path'].startswith(dirs[-1][b'path']): archive.extract_item(dirs.pop(-1)) self.print_verbose(remove_surrogates(orig_path)) try: if dry_run: archive.extract_item(item, dry_run=True) else: if stat.S_ISDIR(item[b'mode']): dirs.append(item) archive.extract_item(item, restore_attrs=False) else: archive.extract_item(item) except IOError as e: self.print_error('%s: %s', remove_surrogates(orig_path), e) if not args.dry_run: while dirs: archive.extract_item(dirs.pop(-1)) return self.exit_code def do_delete(self, args): """Delete an existing archive""" repository = self.open_repository(args.archive, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache) stats = Statistics() archive.delete(stats) manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code def do_mount(self, args): """Mount archive or an entire repository as a FUSE fileystem""" try: from attic.fuse import AtticOperations except ImportError: self.print_error('the "llfuse" module is required to use this feature') return self.exit_code if not os.path.isdir(args.mountpoint) or not os.access(args.mountpoint, os.R_OK | os.W_OK | os.X_OK): self.print_error('%s: Mountpoint must be a writable directory' % args.mountpoint) return self.exit_code repository = self.open_repository(args.src) manifest, key = Manifest.load(repository) if args.src.archive: archive = Archive(repository, key, manifest, args.src.archive) else: archive = None operations = AtticOperations(key, repository, manifest, archive) self.print_verbose("Mounting filesystem") try: operations.mount(args.mountpoint, args.options, args.foreground) except RuntimeError: # Relevant error message already printed to stderr by fuse self.exit_code = 1 return self.exit_code def do_list(self, args): """List archive or repository contents""" repository = self.open_repository(args.src) manifest, key = Manifest.load(repository) if args.src.archive: tmap = {1: 'p', 2: 'c', 4: 'd', 6: 'b', 0o10: '-', 0o12: 'l', 0o14: 's'} archive = Archive(repository, key, manifest, args.src.archive) for item in archive.iter_items(): type = tmap.get(item[b'mode'] // 4096, '?') mode = format_file_mode(item[b'mode']) size = 0 if type == '-': try: size = sum(size for _, size, _ in item[b'chunks']) except KeyError: pass mtime = format_time(datetime.fromtimestamp(bigint_to_int(item[b'mtime']) / 1e9)) if b'source' in item: if type == 'l': extra = ' -> %s' % item[b'source'] else: type = 'h' extra = ' link to %s' % item[b'source'] else: extra = '' print('%s%s %-6s %-6s %8d %s %s%s' % (type, mode, item[b'user'] or item[b'uid'], item[b'group'] or item[b'gid'], size, mtime, remove_surrogates(item[b'path']), extra)) else: for archive in sorted(Archive.list_archives(repository, key, manifest), key=attrgetter('ts')): print(format_archive(archive)) return self.exit_code def do_info(self, args): """Show archive details such as disk space used""" repository = self.open_repository(args.archive) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache) stats = archive.calc_stats(cache) print('Name:', archive.name) print('Fingerprint: %s' % hexlify(archive.id).decode('ascii')) print('Hostname:', archive.metadata[b'hostname']) print('Username:', archive.metadata[b'username']) print('Time: %s' % to_localtime(archive.ts).strftime('%c')) print('Command line:', remove_surrogates(' '.join(archive.metadata[b'cmdline']))) print('Number of files: %d' % stats.nfiles) stats.print_('This archive:', cache) return self.exit_code def do_prune(self, args): """Prune repository archives according to specified rules""" repository = self.open_repository(args.repository, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archives = list(sorted(Archive.list_archives(repository, key, manifest, cache), key=attrgetter('ts'), reverse=True)) if args.hourly + args.daily + args.weekly + args.monthly + args.yearly == 0 and args.within is None: self.print_error('At least one of the "within", "hourly", "daily", "weekly", "monthly" or "yearly" ' 'settings must be specified') return 1 if args.prefix: archives = [archive for archive in archives if archive.name.startswith(args.prefix)] keep = [] if args.within: keep += prune_within(archives, args.within) if args.hourly: keep += prune_split(archives, '%Y-%m-%d %H', args.hourly, keep) if args.daily: keep += prune_split(archives, '%Y-%m-%d', args.daily, keep) if args.weekly: keep += prune_split(archives, '%G-%V', args.weekly, keep) if args.monthly: keep += prune_split(archives, '%Y-%m', args.monthly, keep) if args.yearly: keep += prune_split(archives, '%Y', args.yearly, keep) keep.sort(key=attrgetter('ts'), reverse=True) to_delete = [a for a in archives if a not in keep] stats = Statistics() for archive in keep: self.print_verbose('Keeping archive: %s' % format_archive(archive)) for archive in to_delete: if args.dry_run: self.print_verbose('Would prune: %s' % format_archive(archive)) else: self.print_verbose('Pruning archive: %s' % format_archive(archive)) archive.delete(stats) if to_delete and not args.dry_run: manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code helptext = {} helptext['patterns'] = ''' Exclude patterns use a variant of shell pattern syntax, with '*' matching any number of characters, '?' matching any single character, '[...]' matching any single character specified, including ranges, and '[!...]' matching any character not specified. For the purpose of these patterns, the path separator ('\\' for Windows and '/' on other systems) is not treated specially. For a path to match a pattern, it must completely match from start to end, or must match from the start to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a '*' is appended before matching is attempted. Patterns with wildcards should be quoted to protect them from shell expansion. Examples: # Exclude '/home/user/file.o' but not '/home/user/file.odt': $ attic create -e '*.o' repo.attic / # Exclude '/home/user/junk' and '/home/user/subdir/junk' but # not '/home/user/importantjunk' or '/etc/junk': $ attic create -e '/home/*/junk' repo.attic / # Exclude the contents of '/home/user/cache' but not the directory itself: $ attic create -e /home/user/cache/ repo.attic / # The file '/home/user/cache/important' is *not* backed up: $ attic create -e /home/user/cache/ repo.attic / /home/user/cache/important ''' def do_help(self, parser, commands, args): if not args.topic: parser.print_help() elif args.topic in self.helptext: print(self.helptext[args.topic]) elif args.topic in commands: if args.epilog_only: print(commands[args.topic].epilog) elif args.usage_only: commands[args.topic].epilog = None commands[args.topic].print_help() else: commands[args.topic].print_help() else: parser.error('No help available on %s' % (args.topic,)) return self.exit_code def preprocess_args(self, args): deprecations = [ ('--hourly', '--keep-hourly', 'Warning: "--hourly" has been deprecated. Use "--keep-hourly" instead.'), ('--daily', '--keep-daily', 'Warning: "--daily" has been deprecated. Use "--keep-daily" instead.'), ('--weekly', '--keep-weekly', 'Warning: "--weekly" has been deprecated. Use "--keep-weekly" instead.'), ('--monthly', '--keep-monthly', 'Warning: "--monthly" has been deprecated. Use "--keep-monthly" instead.'), ('--yearly', '--keep-yearly', 'Warning: "--yearly" has been deprecated. Use "--keep-yearly" instead.') ] if args and args[0] == 'verify': print('Warning: "attic verify" has been deprecated. Use "attic extract --dry-run" instead.') args = ['extract', '--dry-run'] + args[1:] for i, arg in enumerate(args[:]): for old_name, new_name, warning in deprecations: if arg.startswith(old_name): args[i] = arg.replace(old_name, new_name) print(warning) return args def run(self, args=None): check_extension_modules() keys_dir = get_keys_dir() if not os.path.exists(keys_dir): os.makedirs(keys_dir) os.chmod(keys_dir, stat.S_IRWXU) cache_dir = get_cache_dir() if not os.path.exists(cache_dir): os.makedirs(cache_dir) os.chmod(cache_dir, stat.S_IRWXU) with open(os.path.join(cache_dir, 'CACHEDIR.TAG'), 'w') as fd: fd.write(textwrap.dedent(""" Signature: 8a477f597d28d172789f06886806bc55 # This file is a cache directory tag created by Attic. # For information about cache directory tags, see: # http://www.brynosaurus.com/cachedir/ """).lstrip()) common_parser = argparse.ArgumentParser(add_help=False) common_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='verbose output') # We can't use argparse for "serve" since we don't want it to show up in "Available commands" if args: args = self.preprocess_args(args) parser = argparse.ArgumentParser(description='Attic %s - Deduplicated Backups' % __version__) subparsers = parser.add_subparsers(title='Available commands') subparser = subparsers.add_parser('serve', parents=[common_parser], description=self.do_serve.__doc__) subparser.set_defaults(func=self.do_serve) subparser.add_argument('--restrict-to-path', dest='restrict_to_paths', action='append', metavar='PATH', help='restrict repository access to PATH') init_epilog = textwrap.dedent(""" This command initializes an empty repository. A repository is a filesystem directory containing the deduplicated data from zero or more archives. Encryption can be enabled at repository init time. """) subparser = subparsers.add_parser('init', parents=[common_parser], description=self.do_init.__doc__, epilog=init_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_init) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to create') subparser.add_argument('-e', '--encryption', dest='encryption', choices=('none', 'passphrase', 'keyfile'), default='none', help='select encryption method') check_epilog = textwrap.dedent(""" The check command verifies the consistency of a repository and the corresponding archives. The underlying repository data files are first checked to detect bit rot and other types of damage. After that the consistency and correctness of the archive metadata is verified. The archive metadata checks can be time consuming and requires access to the key file and/or passphrase if encryption is enabled. These checks can be skipped using the --repository-only option. """) subparser = subparsers.add_parser('check', parents=[common_parser], description=self.do_check.__doc__, epilog=check_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_check) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to check consistency of') subparser.add_argument('--repository-only', dest='repo_only', action='store_true', default=False, help='only perform repository checks') subparser.add_argument('--archives-only', dest='archives_only', action='store_true', default=False, help='only perform archives checks') subparser.add_argument('--repair', dest='repair', action='store_true', default=False, help='attempt to repair any inconsistencies found') change_passphrase_epilog = textwrap.dedent(""" The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. """) subparser = subparsers.add_parser('change-passphrase', parents=[common_parser], description=self.do_change_passphrase.__doc__, epilog=change_passphrase_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_change_passphrase) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False)) create_epilog = textwrap.dedent(""" This command creates a backup archive containing all files found while recursively traversing all paths specified. The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. See "attic help patterns" for more help on exclude patterns. """) subparser = subparsers.add_parser('create', parents=[common_parser], description=self.do_create.__doc__, epilog=create_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_create) subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the created archive') subparser.add_argument('-e', '--exclude', dest='excludes', type=ExcludePattern, action='append', metavar="PATTERN", help='exclude paths matching PATTERN') subparser.add_argument('--exclude-from', dest='exclude_files', type=argparse.FileType('r'), action='append', metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line') subparser.add_argument('--exclude-caches', dest='exclude_caches', action='store_true', default=False, help='exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html)') subparser.add_argument('-c', '--checkpoint-interval', dest='checkpoint_interval', type=int, default=300, metavar='SECONDS', help='write checkpoint every SECONDS seconds (Default: 300)') subparser.add_argument('--do-not-cross-mountpoints', dest='dontcross', action='store_true', default=False, help='do not cross mount points') subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true', default=False, help='only store numeric user and group identifiers') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to create') subparser.add_argument('paths', metavar='PATH', nargs='+', type=str, help='paths to archive') extract_epilog = textwrap.dedent(""" This command extracts the contents of an archive. By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of ``PATHs`` as arguments. The file selection can further be restricted by using the ``--exclude`` option. See "attic help patterns" for more help on exclude patterns. """) subparser = subparsers.add_parser('extract', parents=[common_parser], description=self.do_extract.__doc__, epilog=extract_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_extract) subparser.add_argument('-n', '--dry-run', dest='dry_run', default=False, action='store_true', help='do not actually change any files') subparser.add_argument('-e', '--exclude', dest='excludes', type=ExcludePattern, action='append', metavar="PATTERN", help='exclude paths matching PATTERN') subparser.add_argument('--exclude-from', dest='exclude_files', type=argparse.FileType('r'), action='append', metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line') subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true', default=False, help='only obey numeric user and group identifiers') subparser.add_argument('--strip-components', dest='strip_components', type=int, default=0, metavar='NUMBER', help='Remove the specified number of leading path elements. Pathnames with fewer elements will be silently skipped.') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to extract') subparser.add_argument('paths', metavar='PATH', nargs='*', type=str, help='paths to extract') delete_epilog = textwrap.dedent(""" This command deletes an archive from the repository. Any disk space not shared with any other existing archive is also reclaimed. """) subparser = subparsers.add_parser('delete', parents=[common_parser], description=self.do_delete.__doc__, epilog=delete_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_delete) subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the deleted archive') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to delete') list_epilog = textwrap.dedent(""" This command lists the contents of a repository or an archive. """) subparser = subparsers.add_parser('list', parents=[common_parser], description=self.do_list.__doc__, epilog=list_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_list) subparser.add_argument('src', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(), help='repository/archive to list contents of') mount_epilog = textwrap.dedent(""" This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the ``--foreground`` option is given the command will run in the background until the filesystem is ``umounted``. """) subparser = subparsers.add_parser('mount', parents=[common_parser], description=self.do_mount.__doc__, epilog=mount_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_mount) subparser.add_argument('src', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(), help='repository/archive to mount') subparser.add_argument('mountpoint', metavar='MOUNTPOINT', type=str, help='where to mount filesystem') subparser.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='stay in foreground, do not daemonize') subparser.add_argument('-o', dest='options', type=str, help='Extra mount options') info_epilog = textwrap.dedent(""" This command displays some detailed information about the specified archive. """) subparser = subparsers.add_parser('info', parents=[common_parser], description=self.do_info.__doc__, epilog=info_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_info) subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to display information about') prune_epilog = textwrap.dedent(""" The prune command prunes a repository by deleting archives not matching any of the specified retention options. This command is normally used by automated backup scripts wanting to keep a certain number of historic backups. As an example, "-d 7" means to keep the latest backup on each day for 7 days. Days without backups do not count towards the total. The rules are applied from hourly to yearly, and backups selected by previous rules do not count towards those of later rules. The time that each backup completes is used for pruning purposes. Dates and times are interpreted in the local timezone, and weeks go from Monday to Sunday. Specifying a negative number of archives to keep means that there is no limit. The "--keep-within" option takes an argument of the form "<int><char>", where char is "H", "d", "w", "m", "y". For example, "--keep-within 2d" means to keep all archives that were created within the past 48 hours. "1m" is taken to mean "31d". The archives kept with this option do not count towards the totals specified by any other options. If a prefix is set with -p, then only archives that start with the prefix are considered for deletion and only those archives count towards the totals specified by the rules. """) subparser = subparsers.add_parser('prune', parents=[common_parser], description=self.do_prune.__doc__, epilog=prune_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_prune) subparser.add_argument('-n', '--dry-run', dest='dry_run', default=False, action='store_true', help='do not change repository') subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the deleted archive') subparser.add_argument('--keep-within', dest='within', type=str, metavar='WITHIN', help='keep all archives within this time interval') subparser.add_argument('-H', '--keep-hourly', dest='hourly', type=int, default=0, help='number of hourly archives to keep') subparser.add_argument('-d', '--keep-daily', dest='daily', type=int, default=0, help='number of daily archives to keep') subparser.add_argument('-w', '--keep-weekly', dest='weekly', type=int, default=0, help='number of weekly archives to keep') subparser.add_argument('-m', '--keep-monthly', dest='monthly', type=int, default=0, help='number of monthly archives to keep') subparser.add_argument('-y', '--keep-yearly', dest='yearly', type=int, default=0, help='number of yearly archives to keep') subparser.add_argument('-p', '--prefix', dest='prefix', type=str, help='only consider archive names starting with this prefix') subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to prune') subparser = subparsers.add_parser('help', parents=[common_parser], description='Extra help') subparser.add_argument('--epilog-only', dest='epilog_only', action='store_true', default=False) subparser.add_argument('--usage-only', dest='usage_only', action='store_true', default=False) subparser.set_defaults(func=functools.partial(self.do_help, parser, subparsers.choices)) subparser.add_argument('topic', metavar='TOPIC', type=str, nargs='?', help='additional help on TOPIC') args = parser.parse_args(args or ['-h']) self.verbose = args.verbose update_excludes(args) return args.func(args) def main(): # Make sure stdout and stderr have errors='replace') to avoid unicode # issues when print()-ing unicode file names sys.stdout = io.TextIOWrapper(sys.stdout.buffer, sys.stdout.encoding, 'replace', line_buffering=True) sys.stderr = io.TextIOWrapper(sys.stderr.buffer, sys.stderr.encoding, 'replace', line_buffering=True) archiver = Archiver() try: exit_code = archiver.run(sys.argv[1:]) except Error as e: archiver.print_error(e.get_message()) exit_code = e.exit_code except KeyboardInterrupt: archiver.print_error('Error: Keyboard interrupt') exit_code = 1 else: if exit_code: archiver.print_error('Exiting with failure status due to previous errors') sys.exit(exit_code) if __name__ == '__main__': main()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_1622_1
crossvul-python_data_bad_3692_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): user_ref = self.update_user(context, user_id, user) try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The password has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('Password changed for %s, but existing tokens remain ' 'valid' % user_id) return user_ref def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3692_0
crossvul-python_data_bad_4833_0
#!/usr/bin/env python2 # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import time, textwrap, json from bisect import bisect_right from base64 import b64encode from future_builtins import map from threading import Thread from Queue import Queue, Empty from functools import partial from urlparse import urlparse from PyQt5.Qt import ( QWidget, QVBoxLayout, QApplication, QSize, QNetworkAccessManager, QMenu, QIcon, QNetworkReply, QTimer, QNetworkRequest, QUrl, Qt, QNetworkDiskCache, QToolBar, pyqtSlot, pyqtSignal) from PyQt5.QtWebKitWidgets import QWebView, QWebInspector, QWebPage from calibre import prints from calibre.constants import iswindows from calibre.ebooks.oeb.polish.parsing import parse from calibre.ebooks.oeb.base import serialize, OEB_DOCS from calibre.ptempfile import PersistentTemporaryDirectory from calibre.gui2 import error_dialog, open_url, NO_URL_FORMATTING from calibre.gui2.tweak_book import current_container, editors, tprefs, actions, TOP from calibre.gui2.viewer.documentview import apply_settings from calibre.gui2.viewer.config import config from calibre.gui2.widgets2 import HistoryLineEdit2 from calibre.utils.ipc.simple_worker import offload_worker shutdown = object() def get_data(name): 'Get the data for name. Returns a unicode string if name is a text document/stylesheet' if name in editors: return editors[name].get_raw_data() return current_container().raw_data(name) # Parsing of html to add linenumbers {{{ def parse_html(raw): root = parse(raw, decoder=lambda x:x.decode('utf-8'), line_numbers=True, linenumber_attribute='data-lnum') return serialize(root, 'text/html').encode('utf-8') class ParseItem(object): __slots__ = ('name', 'length', 'fingerprint', 'parsing_done', 'parsed_data') def __init__(self, name): self.name = name self.length, self.fingerprint = 0, None self.parsed_data = None self.parsing_done = False def __repr__(self): return 'ParsedItem(name=%r, length=%r, fingerprint=%r, parsing_done=%r, parsed_data_is_None=%r)' % ( self.name, self.length, self.fingerprint, self.parsing_done, self.parsed_data is None) class ParseWorker(Thread): daemon = True SLEEP_TIME = 1 def __init__(self): Thread.__init__(self) self.requests = Queue() self.request_count = 0 self.parse_items = {} self.launch_error = None def run(self): mod, func = 'calibre.gui2.tweak_book.preview', 'parse_html' try: # Connect to the worker and send a dummy job to initialize it self.worker = offload_worker(priority='low') self.worker(mod, func, '<p></p>') except: import traceback traceback.print_exc() self.launch_error = traceback.format_exc() return while True: time.sleep(self.SLEEP_TIME) x = self.requests.get() requests = [x] while True: try: requests.append(self.requests.get_nowait()) except Empty: break if shutdown in requests: self.worker.shutdown() break request = sorted(requests, reverse=True)[0] del requests pi, data = request[1:] try: res = self.worker(mod, func, data) except: import traceback traceback.print_exc() else: pi.parsing_done = True parsed_data = res['result'] if res['tb']: prints("Parser error:") prints(res['tb']) else: pi.parsed_data = parsed_data def add_request(self, name): data = get_data(name) ldata, hdata = len(data), hash(data) pi = self.parse_items.get(name, None) if pi is None: self.parse_items[name] = pi = ParseItem(name) else: if pi.parsing_done and pi.length == ldata and pi.fingerprint == hdata: return pi.parsed_data = None pi.parsing_done = False pi.length, pi.fingerprint = ldata, hdata self.requests.put((self.request_count, pi, data)) self.request_count += 1 def shutdown(self): self.requests.put(shutdown) def get_data(self, name): return getattr(self.parse_items.get(name, None), 'parsed_data', None) def clear(self): self.parse_items.clear() def is_alive(self): return Thread.is_alive(self) or (hasattr(self, 'worker') and self.worker.is_alive()) parse_worker = ParseWorker() # }}} # Override network access to load data "live" from the editors {{{ class NetworkReply(QNetworkReply): def __init__(self, parent, request, mime_type, name): QNetworkReply.__init__(self, parent) self.setOpenMode(QNetworkReply.ReadOnly | QNetworkReply.Unbuffered) self.setRequest(request) self.setUrl(request.url()) self._aborted = False if mime_type in OEB_DOCS: self.resource_name = name QTimer.singleShot(0, self.check_for_parse) else: data = get_data(name) if isinstance(data, type('')): data = data.encode('utf-8') mime_type += '; charset=utf-8' self.__data = data self.setHeader(QNetworkRequest.ContentTypeHeader, mime_type) self.setHeader(QNetworkRequest.ContentLengthHeader, len(self.__data)) QTimer.singleShot(0, self.finalize_reply) def check_for_parse(self): if self._aborted: return data = parse_worker.get_data(self.resource_name) if data is None: return QTimer.singleShot(10, self.check_for_parse) self.__data = data self.setHeader(QNetworkRequest.ContentTypeHeader, 'application/xhtml+xml; charset=utf-8') self.setHeader(QNetworkRequest.ContentLengthHeader, len(self.__data)) self.finalize_reply() def bytesAvailable(self): try: return len(self.__data) except AttributeError: return 0 def isSequential(self): return True def abort(self): self._aborted = True def readData(self, maxlen): ans, self.__data = self.__data[:maxlen], self.__data[maxlen:] return ans read = readData def finalize_reply(self): if self._aborted: return self.setFinished(True) self.setAttribute(QNetworkRequest.HttpStatusCodeAttribute, 200) self.setAttribute(QNetworkRequest.HttpReasonPhraseAttribute, "Ok") self.metaDataChanged.emit() self.downloadProgress.emit(len(self.__data), len(self.__data)) self.readyRead.emit() self.finished.emit() class NetworkAccessManager(QNetworkAccessManager): OPERATION_NAMES = {getattr(QNetworkAccessManager, '%sOperation'%x) : x.upper() for x in ('Head', 'Get', 'Put', 'Post', 'Delete', 'Custom') } def __init__(self, *args): QNetworkAccessManager.__init__(self, *args) self.current_root = None self.cache = QNetworkDiskCache(self) self.setCache(self.cache) self.cache.setCacheDirectory(PersistentTemporaryDirectory(prefix='disk_cache_')) self.cache.setMaximumCacheSize(0) def createRequest(self, operation, request, data): url = unicode(request.url().toString(NO_URL_FORMATTING)) if operation == self.GetOperation and url.startswith('file://'): path = url[7:] if iswindows and path.startswith('/'): path = path[1:] c = current_container() try: name = c.abspath_to_name(path, root=self.current_root) except ValueError: # Happens on windows with absolute paths on different drives name = None if c.has_name(name): try: return NetworkReply(self, request, c.mime_map.get(name, 'application/octet-stream'), name) except Exception: import traceback traceback.print_exc() return QNetworkAccessManager.createRequest(self, operation, request, data) # }}} def uniq(vals): ''' Remove all duplicates from vals, while preserving order. ''' vals = vals or () seen = set() seen_add = seen.add return tuple(x for x in vals if x not in seen and not seen_add(x)) def find_le(a, x): 'Find rightmost value in a less than or equal to x' try: return a[bisect_right(a, x)] except IndexError: return a[-1] class WebPage(QWebPage): sync_requested = pyqtSignal(object, object, object) split_requested = pyqtSignal(object, object) def __init__(self, parent): QWebPage.__init__(self, parent) settings = self.settings() apply_settings(settings, config().parse()) settings.setMaximumPagesInCache(0) settings.setAttribute(settings.JavaEnabled, False) settings.setAttribute(settings.PluginsEnabled, False) settings.setAttribute(settings.PrivateBrowsingEnabled, True) settings.setAttribute(settings.JavascriptCanOpenWindows, False) settings.setAttribute(settings.JavascriptCanAccessClipboard, False) settings.setAttribute(settings.LinksIncludedInFocusChain, False) settings.setAttribute(settings.DeveloperExtrasEnabled, True) settings.setDefaultTextEncoding('utf-8') data = 'data:text/css;charset=utf-8;base64,' css = '[data-in-split-mode="1"] [data-is-block="1"]:hover { cursor: pointer !important; border-top: solid 5px green !important }' data += b64encode(css.encode('utf-8')) settings.setUserStyleSheetUrl(QUrl(data)) self.setNetworkAccessManager(NetworkAccessManager(self)) self.setLinkDelegationPolicy(self.DelegateAllLinks) self.mainFrame().javaScriptWindowObjectCleared.connect(self.init_javascript) self.init_javascript() @dynamic_property def current_root(self): def fget(self): return self.networkAccessManager().current_root def fset(self, val): self.networkAccessManager().current_root = val return property(fget=fget, fset=fset) def javaScriptConsoleMessage(self, msg, lineno, source_id): prints('preview js:%s:%s:'%(unicode(source_id), lineno), unicode(msg)) def init_javascript(self): if not hasattr(self, 'js'): from calibre.utils.resources import compiled_coffeescript self.js = compiled_coffeescript('ebooks.oeb.display.utils', dynamic=False) self.js += P('csscolorparser.js', data=True, allow_user_override=False) self.js += compiled_coffeescript('ebooks.oeb.polish.preview', dynamic=False) self._line_numbers = None mf = self.mainFrame() mf.addToJavaScriptWindowObject("py_bridge", self) mf.evaluateJavaScript(self.js) @pyqtSlot(str, str, str) def request_sync(self, tag_name, href, sourceline_address): try: self.sync_requested.emit(unicode(tag_name), unicode(href), json.loads(unicode(sourceline_address))) except (TypeError, ValueError, OverflowError, AttributeError): pass def go_to_anchor(self, anchor, lnum): self.mainFrame().evaluateJavaScript('window.calibre_preview_integration.go_to_anchor(%s, %s)' % ( json.dumps(anchor), json.dumps(str(lnum)))) @pyqtSlot(str, str) def request_split(self, loc, totals): actions['split-in-preview'].setChecked(False) loc, totals = json.loads(unicode(loc)), json.loads(unicode(totals)) if not loc or not totals: return error_dialog(self.view(), _('Invalid location'), _('Cannot split on the body tag'), show=True) self.split_requested.emit(loc, totals) @property def line_numbers(self): if self._line_numbers is None: def atoi(x): try: ans = int(x) except (TypeError, ValueError): ans = None return ans val = self.mainFrame().evaluateJavaScript('window.calibre_preview_integration.line_numbers()') self._line_numbers = sorted(uniq(filter(lambda x:x is not None, map(atoi, val)))) return self._line_numbers def go_to_line(self, lnum): try: lnum = find_le(self.line_numbers, lnum) except IndexError: return self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.go_to_line(%d)' % lnum) def go_to_sourceline_address(self, sourceline_address): lnum, tags = sourceline_address if lnum is None: return tags = [x.lower() for x in tags] self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.go_to_sourceline_address(%d, %s)' % (lnum, json.dumps(tags))) def split_mode(self, enabled): self.mainFrame().evaluateJavaScript( 'window.calibre_preview_integration.split_mode(%s)' % ( 'true' if enabled else 'false')) class WebView(QWebView): def __init__(self, parent=None): QWebView.__init__(self, parent) self.inspector = QWebInspector(self) w = QApplication.instance().desktop().availableGeometry(self).width() self._size_hint = QSize(int(w/3), int(w/2)) self._page = WebPage(self) self.setPage(self._page) self.inspector.setPage(self._page) self.clear() self.setAcceptDrops(False) def sizeHint(self): return self._size_hint def refresh(self): self.pageAction(self.page().Reload).trigger() @dynamic_property def scroll_pos(self): def fget(self): mf = self.page().mainFrame() return (mf.scrollBarValue(Qt.Horizontal), mf.scrollBarValue(Qt.Vertical)) def fset(self, val): mf = self.page().mainFrame() mf.setScrollBarValue(Qt.Horizontal, val[0]) mf.setScrollBarValue(Qt.Vertical, val[1]) return property(fget=fget, fset=fset) def clear(self): self.setHtml(_( ''' <h3>Live preview</h3> <p>Here you will see a live preview of the HTML file you are currently editing. The preview will update automatically as you make changes. <p style="font-size:x-small; color: gray">Note that this is a quick preview only, it is not intended to simulate an actual ebook reader. Some aspects of your ebook will not work, such as page breaks and page margins. ''')) self.page().current_root = None def setUrl(self, qurl): self.page().current_root = current_container().root return QWebView.setUrl(self, qurl) def inspect(self): self.inspector.parent().show() self.inspector.parent().raise_() self.pageAction(self.page().InspectElement).trigger() def contextMenuEvent(self, ev): menu = QMenu(self) p = self.page() mf = p.mainFrame() r = mf.hitTestContent(ev.pos()) url = unicode(r.linkUrl().toString(NO_URL_FORMATTING)).strip() ca = self.pageAction(QWebPage.Copy) if ca.isEnabled(): menu.addAction(ca) menu.addAction(actions['reload-preview']) menu.addAction(QIcon(I('debug.png')), _('Inspect element'), self.inspect) if url.partition(':')[0].lower() in {'http', 'https'}: menu.addAction(_('Open link'), partial(open_url, r.linkUrl())) menu.exec_(ev.globalPos()) class Preview(QWidget): sync_requested = pyqtSignal(object, object) split_requested = pyqtSignal(object, object, object) split_start_requested = pyqtSignal() link_clicked = pyqtSignal(object, object) refresh_starting = pyqtSignal() refreshed = pyqtSignal() def __init__(self, parent=None): QWidget.__init__(self, parent) self.l = l = QVBoxLayout() self.setLayout(l) l.setContentsMargins(0, 0, 0, 0) self.view = WebView(self) self.view.page().sync_requested.connect(self.request_sync) self.view.page().split_requested.connect(self.request_split) self.view.page().loadFinished.connect(self.load_finished) self.inspector = self.view.inspector self.inspector.setPage(self.view.page()) l.addWidget(self.view) self.bar = QToolBar(self) l.addWidget(self.bar) ac = actions['auto-reload-preview'] ac.setCheckable(True) ac.setChecked(True) ac.toggled.connect(self.auto_reload_toggled) self.auto_reload_toggled(ac.isChecked()) self.bar.addAction(ac) ac = actions['sync-preview-to-editor'] ac.setCheckable(True) ac.setChecked(True) ac.toggled.connect(self.sync_toggled) self.sync_toggled(ac.isChecked()) self.bar.addAction(ac) self.bar.addSeparator() ac = actions['split-in-preview'] ac.setCheckable(True) ac.setChecked(False) ac.toggled.connect(self.split_toggled) self.split_toggled(ac.isChecked()) self.bar.addAction(ac) ac = actions['reload-preview'] ac.triggered.connect(self.refresh) self.bar.addAction(ac) actions['preview-dock'].toggled.connect(self.visibility_changed) self.current_name = None self.last_sync_request = None self.refresh_timer = QTimer(self) self.refresh_timer.timeout.connect(self.refresh) parse_worker.start() self.current_sync_request = None self.search = HistoryLineEdit2(self) self.search.initialize('tweak_book_preview_search') self.search.setPlaceholderText(_('Search in preview')) self.search.returnPressed.connect(partial(self.find, 'next')) self.bar.addSeparator() self.bar.addWidget(self.search) for d in ('next', 'prev'): ac = actions['find-%s-preview' % d] ac.triggered.connect(partial(self.find, d)) self.bar.addAction(ac) def find(self, direction): text = unicode(self.search.text()) self.view.findText(text, QWebPage.FindWrapsAroundDocument | ( QWebPage.FindBackward if direction == 'prev' else QWebPage.FindFlags(0))) def request_sync(self, tagname, href, lnum): if self.current_name: c = current_container() if tagname == 'a' and href: if href and href.startswith('#'): name = self.current_name else: name = c.href_to_name(href, self.current_name) if href else None if name == self.current_name: return self.view.page().go_to_anchor(urlparse(href).fragment, lnum) if name and c.exists(name) and c.mime_map[name] in OEB_DOCS: return self.link_clicked.emit(name, urlparse(href).fragment or TOP) self.sync_requested.emit(self.current_name, lnum) def request_split(self, loc, totals): if self.current_name: self.split_requested.emit(self.current_name, loc, totals) def sync_to_editor(self, name, sourceline_address): self.current_sync_request = (name, sourceline_address) QTimer.singleShot(100, self._sync_to_editor) def _sync_to_editor(self): if not actions['sync-preview-to-editor'].isChecked(): return try: if self.refresh_timer.isActive() or self.current_sync_request[0] != self.current_name: return QTimer.singleShot(100, self._sync_to_editor) except TypeError: return # Happens if current_sync_request is None sourceline_address = self.current_sync_request[1] self.current_sync_request = None self.view.page().go_to_sourceline_address(sourceline_address) def report_worker_launch_error(self): if parse_worker.launch_error is not None: tb, parse_worker.launch_error = parse_worker.launch_error, None error_dialog(self, _('Failed to launch worker'), _( 'Failed to launch the worker process used for rendering the preview'), det_msg=tb, show=True) def show(self, name): if name != self.current_name: self.refresh_timer.stop() self.current_name = name self.report_worker_launch_error() parse_worker.add_request(name) self.view.setUrl(QUrl.fromLocalFile(current_container().name_to_abspath(name))) return True def refresh(self): if self.current_name: self.refresh_timer.stop() # This will check if the current html has changed in its editor, # and re-parse it if so self.report_worker_launch_error() parse_worker.add_request(self.current_name) # Tell webkit to reload all html and associated resources current_url = QUrl.fromLocalFile(current_container().name_to_abspath(self.current_name)) self.refresh_starting.emit() if current_url != self.view.url(): # The container was changed self.view.setUrl(current_url) else: self.view.refresh() self.refreshed.emit() def clear(self): self.view.clear() self.current_name = None @property def current_root(self): return self.view.page().current_root @property def is_visible(self): return actions['preview-dock'].isChecked() @property def live_css_is_visible(self): try: return actions['live-css-dock'].isChecked() except KeyError: return False def start_refresh_timer(self): if self.live_css_is_visible or (self.is_visible and actions['auto-reload-preview'].isChecked()): self.refresh_timer.start(tprefs['preview_refresh_time'] * 1000) def stop_refresh_timer(self): self.refresh_timer.stop() def auto_reload_toggled(self, checked): if self.live_css_is_visible and not actions['auto-reload-preview'].isChecked(): actions['auto-reload-preview'].setChecked(True) error_dialog(self, _('Cannot disable'), _( 'Auto reloading of the preview panel cannot be disabled while the' ' Live CSS panel is open.'), show=True) actions['auto-reload-preview'].setToolTip(_( 'Auto reload preview when text changes in editor') if not checked else _( 'Disable auto reload of preview')) def sync_toggled(self, checked): actions['sync-preview-to-editor'].setToolTip(_( 'Disable syncing of preview position to editor position') if checked else _( 'Enable syncing of preview position to editor position')) def visibility_changed(self, is_visible): if is_visible: self.refresh() def split_toggled(self, checked): actions['split-in-preview'].setToolTip(textwrap.fill(_( 'Abort file split') if checked else _( 'Split this file at a specified location.\n\nAfter clicking this button, click' ' inside the preview panel above at the location you want the file to be split.'))) if checked: self.split_start_requested.emit() else: self.view.page().split_mode(False) def do_start_split(self): self.view.page().split_mode(True) def stop_split(self): actions['split-in-preview'].setChecked(False) def load_finished(self, ok): if actions['split-in-preview'].isChecked(): if ok: self.do_start_split() else: self.stop_split() def apply_settings(self): s = self.view.page().settings() s.setFontSize(s.DefaultFontSize, tprefs['preview_base_font_size']) s.setFontSize(s.DefaultFixedFontSize, tprefs['preview_mono_font_size']) s.setFontSize(s.MinimumLogicalFontSize, tprefs['preview_minimum_font_size']) s.setFontSize(s.MinimumFontSize, tprefs['preview_minimum_font_size']) sf, ssf, mf = tprefs['preview_serif_family'], tprefs['preview_sans_family'], tprefs['preview_mono_family'] s.setFontFamily(s.StandardFont, {'serif':sf, 'sans':ssf, 'mono':mf, None:sf}[tprefs['preview_standard_font_family']]) s.setFontFamily(s.SerifFont, sf) s.setFontFamily(s.SansSerifFont, ssf) s.setFontFamily(s.FixedFont, mf)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_4833_0
crossvul-python_data_bad_3693_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import sql from keystone import exception from keystone import token class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) @classmethod def from_dict(cls, token_dict): # shove any non-indexed properties into extra extra = copy.deepcopy(token_dict) data = {} for k in ('id', 'expires'): data[k] = extra.pop(k, None) data['extra'] = extra return cls(**data) def to_dict(self): out = copy.deepcopy(self.extra) out['id'] = self.id out['expires'] = self.expires return out class Token(sql.Base, token.Driver): # Public interface def get_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel).filter_by(id=token_id).first() now = datetime.datetime.utcnow() if token_ref and (not token_ref.expires or now < token_ref.expires): return token_ref.to_dict() else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data_copy: data_copy['expires'] = self._get_default_expire_time() token_ref = TokenModel.from_dict(data_copy) token_ref.id = token_id session = self.get_session() with session.begin(): session.add(token_ref) session.flush() return token_ref.to_dict() def delete_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel)\ .filter_by(id=token_id)\ .first() if not token_ref: raise exception.TokenNotFound(token_id=token_id) with session.begin(): session.delete(token_ref) session.flush()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3693_2
crossvul-python_data_bad_3695_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Token service.""" import datetime from keystone import config from keystone import exception from keystone.common import manager CONF = config.CONF config.register_int('expiration', group='token', default=86400) class Manager(manager.Manager): """Default pivot point for the Token backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.token.driver) class Driver(object): """Interface description for a Token driver.""" def get_token(self, token_id): """Get a token by id. :param token_id: identity of the token :type token_id: string :returns: token_ref :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def create_token(self, token_id, data): """Create a token by id and data. :param token_id: identity of the token :type token_id: string :param data: dictionary with additional reference information :: { expires='' id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref } :type data: dict :returns: token_ref or None. """ raise exception.NotImplemented() def delete_token(self, token_id): """Deletes a token by id. :param token_id: identity of the token :type token_id: string :returns: None. :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def _get_default_expire_time(self): """Determine when a token should expire based on the config. :returns: a naive utc datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) return datetime.datetime.utcnow() + expire_delta
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3695_4
crossvul-python_data_bad_3771_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import random import socket import StringIO import subprocess import unittest import nose.plugins.skip from glance.common import config from glance.common import utils from glance.common import wsgi from glance import context from glance.openstack.common import cfg CONF = cfg.CONF def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_tmp_dir = os.getenv('GLANCE_TEST_TMP_DIR', '/tmp') test_dir = os.path.join(test_tmp_dir, "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir class BaseTestCase(unittest.TestCase): def setUp(self): super(BaseTestCase, self).setUp() #NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) def tearDown(self): super(BaseTestCase, self).tearDown() CONF.reset() def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.iteritems(): CONF.set_override(k, v, group) class skip_test(object): """Decorator that skips a test.""" def __init__(self, msg): self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_if(object): """Decorator that skips a test if condition is true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_unless(object): """Decorator that skips a test if condition is not true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if not self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): raise nose.SkipTest(message) func(*a, **kwargs) return wrapped def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute executable = cmd.split()[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = "Command %(cmd)s did not succeed. Returned an exit "\ "code of %(exitcode)d."\ "\n\nSTDOUT: %(out)s"\ "\n\nSTDERR: %(err)s" % locals() if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() s.close() return port def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, str(value)) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write("XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', '1') except IOError, e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = { 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_tok = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_tok: user, tenant, role = auth_tok.split(':') if tenant.lower() == 'none': tenant = None roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or 'I am a teapot, short and stout\n' self.data = StringIO.StringIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default) def getheaders(self): return self.headers or {} def read(self, amt): self.data.read(amt)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3771_3
crossvul-python_data_bad_3693_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import kvs from keystone import exception from keystone import token class Token(kvs.Base, token.Driver): # Public interface def get_token(self, token_id): token = self.db.get('token-%s' % token_id) if (token and (token['expires'] is None or token['expires'] > datetime.datetime.utcnow())): return token else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data: data_copy['expires'] = self._get_default_expire_time() self.db.set('token-%s' % token_id, data_copy) return copy.deepcopy(data_copy) def delete_token(self, token_id): try: return self.db.delete('token-%s' % token_id) except KeyError: raise exception.TokenNotFound(token_id=token_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3693_1
crossvul-python_data_bad_2042_1
from collections import OrderedDict import copy import operator from functools import partial, reduce, update_wrapper import warnings from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import widgets, helpers from django.contrib.admin import validation from django.contrib.admin.checks import (BaseModelAdminChecks, ModelAdminChecks, InlineModelAdminChecks) from django.contrib.admin.utils import (quote, unquote, flatten_fieldsets, get_deleted_objects, model_format_dict, NestedObjects, lookup_needs_distinct) from django.contrib.admin.templatetags.admin_static import static from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.auth import get_permission_codename from django.core import checks from django.core.exceptions import (PermissionDenied, ValidationError, FieldError, ImproperlyConfigured) from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.db import models, transaction, router from django.db.models.constants import LOOKUP_SEP from django.db.models.related import RelatedObject from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist from django.db.models.sql.constants import QUERY_TERMS from django.forms.formsets import all_valid, DELETION_FIELD_NAME from django.forms.models import (modelform_factory, modelformset_factory, inlineformset_factory, BaseInlineFormSet, modelform_defines_fields) from django.http import Http404, HttpResponseRedirect from django.http.response import HttpResponseBase from django.shortcuts import get_object_or_404 from django.template.response import SimpleTemplateResponse, TemplateResponse from django.utils import six from django.utils.decorators import method_decorator from django.utils.deprecation import (RenameMethodsBase, RemovedInDjango18Warning, RemovedInDjango19Warning) from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.html import escape, escapejs from django.utils.http import urlencode from django.utils.text import capfirst, get_text_list from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils.safestring import mark_safe from django.views.decorators.csrf import csrf_protect IS_POPUP_VAR = '_popup' TO_FIELD_VAR = '_to_field' HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return 'radiolist' if radio_style == VERTICAL else 'radiolist inline' class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, models.EmailField: {'widget': widgets.AdminEmailInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class RenameBaseModelAdminMethods(forms.MediaDefiningClass, RenameMethodsBase): renamed_methods = ( ('queryset', 'get_queryset', RemovedInDjango18Warning), ) class BaseModelAdmin(six.with_metaclass(RenameBaseModelAdminMethods)): """Functionality common to both ModelAdmin and InlineAdmin.""" raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None view_on_site = True # Validation of ModelAdmin definitions # Old, deprecated style: validator_class = None default_validator_class = validation.BaseValidator # New style: checks_class = BaseModelAdminChecks @classmethod def validate(cls, model): warnings.warn( 'ModelAdmin.validate() is deprecated. Use "check()" instead.', RemovedInDjango19Warning) if cls.validator_class: validator = cls.validator_class() else: validator = cls.default_validator_class() validator.validate(cls, model) @classmethod def check(cls, model, **kwargs): if cls.validator_class: warnings.warn( 'ModelAdmin.validator_class is deprecated. ' 'ModeAdmin validators must be converted to use ' 'the system check framework.', RemovedInDjango19Warning) validator = cls.validator_class() try: validator.validate(cls, model) except ImproperlyConfigured as e: return [checks.Error(e.args[0], hint=None, obj=cls)] else: return [] else: return cls.checks_class().check(cls, model, **kwargs) def __init__(self): overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy() overrides.update(self.formfield_overrides) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ request = kwargs.pop("request", None) # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs) # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get(db_field.rel.to) can_add_related = bool(related_modeladmin and related_modeladmin.has_add_permission(request)) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.rel, self.admin_site, can_add_related=can_add_related) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs) return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request=None, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): """ If the ModelAdmin specifies ordering, the queryset should respect that ordering. Otherwise don't specify the queryset, let the field decide (returns None in that case). """ related_admin = self.admin_site._registry.get(db_field.rel.to, None) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.rel.to._default_manager.using(db).order_by(*ordering) return None def formfield_for_foreignkey(self, db_field, request=None, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = _('None') if db_field.blank else None if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request=None, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.rel.through._meta.auto_created: return None db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, self.admin_site, using=db) kwargs['help_text'] = '' elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)): kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical)) if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif self.view_on_site and hasattr(obj, 'get_absolute_url'): # use the ContentType lookup if view_on_site is True return reverse('admin:view_on_site', kwargs={ 'content_type_id': get_content_type_for_model(obj).pk, 'object_id': obj.pk }) @property def declared_fieldsets(self): warnings.warn( "ModelAdmin.declared_fieldsets is deprecated and " "will be removed in Django 1.9.", RemovedInDjango19Warning, stacklevel=2 ) if self.fieldsets: return self.fieldsets elif self.fields: return [(None, {'fields': self.fields})] return None def get_fields(self, request, obj=None): """ Hook for specifying fields. """ return self.fields def get_fieldsets(self, request, obj=None): """ Hook for specifying fieldsets. """ # We access the property and check if it triggers a warning. # If it does, then it's ours and we can safely ignore it, but if # it doesn't then it has been overridden so we must warn about the # deprecation. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") declared_fieldsets = self.declared_fieldsets if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning): warnings.warn( "ModelAdmin.declared_fieldsets is deprecated and " "will be removed in Django 1.9.", RemovedInDjango19Warning ) if declared_fieldsets: return declared_fieldsets if self.fieldsets: return self.fieldsets return [(None, {'fields': self.get_fields(request, obj)})] def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def get_queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for l in model._meta.related_fkey_lookups: # As ``limit_choices_to`` can be a callable, invoke it here. if callable(l): l = l() for k, v in widgets.url_params_from_lookup_dict(l).items(): if k == lookup and v == value: return True parts = lookup.split(LOOKUP_SEP) # Last term in lookup is a query term (__exact, __startswith etc) # This term can be ignored. if len(parts) > 1 and parts[-1] in QUERY_TERMS: parts.pop() # Special case -- foo__id__exact and foo__id queries are implied # if foo has been specifically included in the lookup list; so # drop __id if it is the last part. However, first we need to find # the pk attribute name. rel_name = None for part in parts[:-1]: try: field, _, _, _ = model._meta.get_field_by_name(part) except FieldDoesNotExist: # Lookups on non-existent fields are ok, since they're ignored # later. return True if hasattr(field, 'rel'): if field.rel is None: # This property or relation doesn't exist, but it's allowed # since it's ignored in ChangeList.get_filters(). return True model = field.rel.to rel_name = field.rel.get_related_field().name elif isinstance(field, RelatedObject): model = field.model rel_name = model._meta.pk.name else: rel_name = None if rel_name and len(parts) > 1 and parts[-1] == rel_name: parts.pop() if len(parts) == 1: return True clean_lookup = LOOKUP_SEP.join(parts) valid_lookups = [self.date_hierarchy] for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter): valid_lookups.append(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.append(filter_item[0]) else: valid_lookups.append(filter_item) return clean_lookup in valid_lookups def has_add_permission(self, request): """ Returns True if the given request has permission to add an object. Can be overridden by the user in subclasses. """ opts = self.opts codename = get_permission_codename('add', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts codename = get_permission_codename('delete', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) @python_2_unicode_compatible class ModelAdmin(BaseModelAdmin): "Encapsulates all admin options and functionality for a given model." list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_on_top = False paginator = Paginator preserve_filters = True inlines = [] # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None # Actions actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True # validation # Old, deprecated style: default_validator_class = validation.ModelAdminValidator # New style: checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super(ModelAdmin, self).__init__() def __str__(self): return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.inlines: inline = inline_class(self.model, self.admin_site) if request: if not (inline.has_add_permission(request) or inline.has_change_permission(request, obj) or inline.has_delete_permission(request, obj)): continue if not inline.has_add_permission(request): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.conf.urls import patterns, url def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.model_name urlpatterns = patterns('', url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info), url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info), url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info), url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info), url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info), ) return urlpatterns def urls(self): return self.get_urls() urls = property(urls) @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'core.js', 'admin/RelatedObjectLookups.js', 'jquery%s.js' % extra, 'jquery.init.js' ] if self.actions is not None: js.append('actions%s.js' % extra) if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_model_perms(self, request): """ Returns a dict of all perms for this model. This dict has the keys ``add``, ``change``, and ``delete`` mapping to the True/False for each of those actions. """ return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), } def get_fields(self, request, obj=None): if self.fields: return self.fields form = self.get_form(request, obj, fields=None) return list(form.base_fields) + list(self.get_readonly_fields(request, obj)) def get_form(self, request, obj=None, **kwargs): """ Returns a Form class for use in the admin add view. This is used by add_view and change_view. """ if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistent with the # default on modelform_factory exclude = exclude or None defaults = { "form": self.form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.' % (e, self.__class__.__name__)) def get_changelist(self, request, **kwargs): """ Returns the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_object(self, request, object_id): """ Returns an instance matching the primary key provided. ``None`` is returned if no match is found (or the object_id failed validation against the primary key field). """ queryset = self.get_queryset(request) model = queryset.model try: object_id = model._meta.pk.to_python(object_id) return queryset.get(pk=object_id) except (model.DoesNotExist, ValidationError, ValueError): return None def get_changelist_form(self, request, **kwargs): """ Returns a Form class for use in the Formset on the changelist page. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) if (defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form'))): defaults['fields'] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Returns a FormSet class for use on the changelist page if list_editable is used. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelformset_factory(self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults) def _get_formsets(self, request, obj): """ Helper function that exists to allow the deprecation warning to be executed while this function continues to return a generator. """ for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj) def get_formsets(self, request, obj=None): warnings.warn( "ModelAdmin.get_formsets() is deprecated and will be removed in " "Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.", RemovedInDjango19Warning, stacklevel=2 ) return self._get_formsets(request, obj) def get_formsets_with_inlines(self, request, obj=None): """ Yields formsets and the corresponding inlines. """ # We call get_formsets() [deprecated] and check if it triggers a # warning. If it does, then it's ours and we can safely ignore it, but # if it doesn't then it has been overridden so we must warn about the # deprecation. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") formsets = self.get_formsets(request, obj) if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning): warnings.warn( "ModelAdmin.get_formsets() is deprecated and will be removed in " "Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.", RemovedInDjango19Warning ) if formsets: zipped = zip(formsets, self.get_inline_instances(request, None)) for formset, inline in zipped: yield formset, inline else: for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj), inline def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, object): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, ADDITION LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=force_text(object), action_flag=ADDITION ) def log_change(self, request, object, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, CHANGE LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=force_text(object), action_flag=CHANGE, change_message=message ) def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, DELETION LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=object_repr, action_flag=DELETION ) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk)) action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />') action_checkbox.allow_tags = True def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is explicitly set to None that means that we don't # want *any* actions enabled on this page. from django.contrib.admin.views.main import _is_changelist_popup if self.actions is None or _is_changelist_popup(request): return OrderedDict() actions = [] # Gather actions from the admin site first for (name, func) in self.admin_site.actions: description = getattr(func, 'short_description', name.replace('_', ' ')) actions.append((func, name, description)) # Then gather them from the model admin and all parent classes, # starting with self and working back up. for klass in self.__class__.mro()[::-1]: class_actions = getattr(klass, 'actions', []) # Avoid trying to iterate over None if not class_actions: continue actions.extend(self.get_action(action) for action in class_actions) # get_action might have returned None, so filter any of those out. actions = filter(None, actions) # Convert the actions into an OrderedDict keyed by name. actions = OrderedDict( (name, (func, name, desc)) for func, name, desc in actions ) return actions def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in six.itervalues(self.get_actions(request)): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None if hasattr(func, 'short_description'): description = func.short_description else: description = capfirst(action.replace('_', ' ')) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if self.list_display_links or self.list_display_links is None or not list_display: return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def get_list_filter(self, request): """ Returns a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page. """ return self.list_filter def get_search_fields(self, request): """ Returns a sequence containing the fields to be searched whenever somebody submits a search query. """ return self.search_fields def get_search_results(self, request, queryset, search_term): """ Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name use_distinct = False search_fields = self.get_search_fields(request) if search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.opts, search_spec): use_distinct = True break return queryset, use_distinct def get_preserved_filters(self, request): """ Returns the preserved filters querystring. """ match = request.resolver_match if self.preserve_filters and match: opts = self.model._meta current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if current_url == changelist_url: preserved_filters = request.GET.urlencode() else: preserved_filters = request.GET.get('_changelist_filters') if preserved_filters: return urlencode({'_changelist_filters': preserved_filters}) return '' def construct_change_message(self, request, form, formsets): """ Construct a change message from a changed object. """ change_message = [] if form.changed_data: change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and'))) if formsets: for formset in formsets: for added_object in formset.new_objects: change_message.append(_('Added %(name)s "%(object)s".') % {'name': force_text(added_object._meta.verbose_name), 'object': force_text(added_object)}) for changed_object, changed_fields in formset.changed_objects: change_message.append(_('Changed %(list)s for %(name)s "%(object)s".') % {'list': get_text_list(changed_fields, _('and')), 'name': force_text(changed_object._meta.verbose_name), 'object': force_text(changed_object)}) for deleted_object in formset.deleted_objects: change_message.append(_('Deleted %(name)s "%(object)s".') % {'name': force_text(deleted_object._meta.verbose_name), 'object': force_text(deleted_object)}) change_message = ' '.join(change_message) return change_message or _('No fields changed.') def message_user(self, request, message, level=messages.INFO, extra_tags='', fail_silently=False): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. Exposes almost the same API as messages.add_message(), but accepts the positional arguments in a different order to maintain backwards compatibility. For convenience, it accepts the `level` argument as a string rather than the usual level number. """ if not isinstance(level, int): # attempt to get the level if passed a string try: level = getattr(messages.constants, level.upper()) except AttributeError: levels = messages.constants.DEFAULT_TAGS.values() levels_repr = ', '.join('`%s`' % l for l in levels) raise ValueError('Bad message level string: `%s`. ' 'Possible values are: %s' % (level, levels_repr)) messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None): opts = self.model._meta app_label = opts.app_label preserved_filters = self.get_preserved_filters(request) form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url) view_on_site_url = self.get_view_on_site_url(obj) context.update({ 'add': add, 'change': change, 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_file_field': True, # FIXME - this should check if form or formsets have a FileField, 'has_absolute_url': view_on_site_url is not None, 'absolute_url': view_on_site_url, 'form_url': form_url, 'opts': opts, 'content_type_id': get_content_type_for_model(self.model).pk, 'save_as': self.save_as, 'save_on_top': self.save_on_top, 'to_field_var': TO_FIELD_VAR, 'is_popup_var': IS_POPUP_VAR, 'app_label': app_label, }) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template return TemplateResponse(request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, opts.model_name), "admin/%s/change_form.html" % app_label, "admin/change_form.html" ], context, current_app=self.admin_site.name) def response_add(self, request, obj, post_url_continue=None): """ Determines the HttpResponse for the add_view stage. """ opts = obj._meta pk_value = obj._get_pk_val() preserved_filters = self.get_preserved_filters(request) msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)} # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) return SimpleTemplateResponse('admin/popup_response.html', { 'pk_value': escape(pk_value), # for possible backwards-compatibility 'value': escape(value), 'obj': escapejs(obj) }) elif "_continue" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) if post_url_continue is None: post_url_continue = reverse('admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(quote(pk_value),), current_app=self.admin_site.name) post_url_continue = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url_continue) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) def response_change(self, request, obj): """ Determines the HttpResponse for the change_view stage. """ opts = self.model._meta pk_value = obj._get_pk_val() preserved_filters = self.get_preserved_filters(request) msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)} if "_continue" in request.POST: msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_saveasnew" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(pk_value,), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_addanother" in request.POST: msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_add' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_change(request, obj) def response_post_save_add(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when adding a new object. """ opts = self.model._meta if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_post_save_change(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when editing an existing object. """ opts = self.model._meta if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func = self.get_actions(request)[action][0] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse-like object, which will be # used as the response from the POST. If not, we'll be a good # little HTTP citizen and redirect back to the changelist page. if isinstance(response, HttpResponseBase): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg, messages.WARNING) return None def response_delete(self, request, obj_display): """ Determines the HttpResponse for the delete_view stage. """ opts = self.model._meta self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % { 'name': force_text(opts.verbose_name), 'obj': force_text(obj_display) }, messages.SUCCESS) if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {'preserved_filters': preserved_filters, 'opts': opts}, post_url ) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def render_delete_form(self, request, context): opts = self.model._meta app_label = opts.app_label return TemplateResponse(request, self.delete_confirmation_template or [ "admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name), "admin/{}/delete_confirmation.html".format(app_label), "admin/delete_confirmation.html" ], context, current_app=self.admin_site.name) def get_inline_formsets(self, request, formsets, inline_instances, obj=None): inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets, prepopulated, readonly, model_admin=self) inline_admin_formsets.append(inline_admin_formset) return inline_admin_formsets def get_changeform_initial_data(self, request): """ Get the initial form data. Unless overridden, this populates from the GET params. """ initial = dict(request.GET.items()) for k in initial: try: f = self.model._meta.get_field(k) except models.FieldDoesNotExist: continue # We have to special-case M2Ms as a list of comma-separated PKs. if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") return initial @csrf_protect_m @transaction.atomic def changeform_view(self, request, object_id=None, form_url='', extra_context=None): model = self.model opts = model._meta add = object_id is None if add: if not self.has_add_permission(request): raise PermissionDenied obj = None else: obj = self.get_object(request, unquote(object_id)) if not self.has_change_permission(request, obj): raise PermissionDenied if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % { 'name': force_text(opts.verbose_name), 'key': escape(object_id)}) if request.method == 'POST' and "_saveasnew" in request.POST: return self.add_view(request, form_url=reverse('admin:%s_%s_add' % ( opts.app_label, opts.model_name), current_app=self.admin_site.name)) ModelForm = self.get_form(request, obj) if request.method == 'POST': form = ModelForm(request.POST, request.FILES, instance=obj) if form.is_valid(): form_validated = True new_object = self.save_form(request, form, change=not add) else: form_validated = False new_object = form.instance formsets, inline_instances = self._create_formsets(request, new_object) if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, not add) self.save_related(request, form, formsets, not add) if add: self.log_addition(request, new_object) return self.response_add(request, new_object) else: change_message = self.construct_change_message(request, form, formsets) self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: if add: initial = self.get_changeform_initial_data(request) form = ModelForm(initial=initial) formsets, inline_instances = self._create_formsets(request, self.model()) else: form = ModelForm(instance=obj) formsets, inline_instances = self._create_formsets(request, obj) adminForm = helpers.AdminForm( form, list(self.get_fieldsets(request, obj)), self.get_prepopulated_fields(request, obj), self.get_readonly_fields(request, obj), model_admin=self) media = self.media + adminForm.media inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj) for inline_formset in inline_formsets: media = media + inline_formset.media context = dict(self.admin_site.each_context(), title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name), adminform=adminForm, object_id=object_id, original=obj, is_popup=(IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET), to_field=request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)), media=media, inline_admin_formsets=inline_formsets, errors=helpers.AdminErrorList(form, formsets), preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url) def add_view(self, request, form_url='', extra_context=None): return self.changeform_view(request, None, form_url, extra_context) def change_view(self, request, object_id, form_url='', extra_context=None): return self.changeform_view(request, object_id, form_url, extra_context) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG opts = self.model._meta app_label = opts.app_label if not self.has_change_permission(request, None): raise PermissionDenied list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) list_filter = self.get_list_filter(request) search_fields = self.get_search_fields(request) # Check actions to see if any are available on this changelist actions = self.get_actions(request) if actions: # Add the action checkboxes if there are any actions available. list_display = ['action_checkbox'] + list(list_display) ChangeList = self.get_changelist(request) try: cl = ChangeList(request, self.model, list_display, list_display_links, list_filter, self.date_hierarchy, search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET.keys(): return SimpleTemplateResponse('admin/invalid_setup.html', { 'title': _('Database error'), }) return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1') # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) # Actions with no confirmation if (actions and request.method == 'POST' and 'index' in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True else: msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) action_failed = True # Actions with confirmation if (actions and request.method == 'POST' and helpers.ACTION_CHECKBOX_NAME in request.POST and 'index' not in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if (request.method == "POST" and cl.list_editable and '_save' in request.POST and not action_failed): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list) if formset.is_valid(): changecount = 0 for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message(request, form, None) self.log_change(request, obj, change_msg) changecount += 1 if changecount: if changecount == 1: name = force_text(opts.verbose_name) else: name = force_text(opts.verbose_name_plural) msg = ungettext("%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount) % {'count': changecount, 'name': name, 'obj': force_text(obj)} self.message_user(request, msg, messages.SUCCESS) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable: FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) else: action_form = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = dict( self.admin_site.each_context(), module_name=force_text(opts.verbose_name_plural), selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, selection_note_all=selection_note_all % {'total_count': cl.result_count}, title=cl.title, is_popup=cl.is_popup, to_field=cl.to_field, cl=cl, media=media, has_add_permission=self.has_add_permission(request), opts=cl.opts, action_form=action_form, actions_on_top=self.actions_on_top, actions_on_bottom=self.actions_on_bottom, actions_selection_counter=self.actions_selection_counter, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context, current_app=self.admin_site.name) @csrf_protect_m @transaction.atomic def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label obj = self.get_object(request, unquote(object_id)) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: raise Http404( _('%(name)s object with primary key %(key)r does not exist.') % {'name': force_text(opts.verbose_name), 'key': escape(object_id)} ) using = router.db_for_write(self.model) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. (deleted_objects, perms_needed, protected) = get_deleted_objects( [obj], opts, request.user, self.admin_site, using) if request.POST: # The user has already confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = force_text(obj) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) return self.response_delete(request, obj_display) object_name = force_text(opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = dict( self.admin_site.each_context(), title=title, object_name=object_name, object=obj, deleted_objects=deleted_objects, perms_lacking=perms_needed, protected=protected, opts=opts, app_label=app_label, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return self.render_delete_form(request, context) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry # First check if the user can see this history. model = self.model obj = get_object_or_404(self.get_queryset(request), pk=unquote(object_id)) if not self.has_change_permission(request, obj): raise PermissionDenied # Then get the history for this object. opts = model._meta app_label = opts.app_label action_list = LogEntry.objects.filter( object_id=unquote(object_id), content_type=get_content_type_for_model(model) ).select_related().order_by('action_time') context = dict(self.admin_site.each_context(), title=_('Change history: %s') % force_text(obj), action_list=action_list, module_name=capfirst(force_text(opts.verbose_name_plural)), object=obj, opts=opts, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return TemplateResponse(request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, opts.model_name), "admin/%s/object_history.html" % app_label, "admin/object_history.html" ], context, current_app=self.admin_site.name) def _create_formsets(self, request, obj): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if obj.pk: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = { 'instance': obj, 'prefix': prefix, 'queryset': inline.get_queryset(request), } if request.method == 'POST': formset_params.update({ 'data': request.POST, 'files': request.FILES, 'save_as_new': '_saveasnew' in request.POST }) formsets.append(FormSet(**formset_params)) inline_instances.append(inline) return formsets, inline_instances class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``fk_name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 min_num = None max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True checks_class = InlineModelAdminChecks def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta super(InlineModelAdmin, self).__init__() if self.verbose_name is None: self.verbose_name = self.model._meta.verbose_name if self.verbose_name_plural is None: self.verbose_name_plural = self.model._meta.verbose_name_plural @property def media(self): extra = '' if settings.DEBUG else '.min' js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra] if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) if self.filter_vertical or self.filter_horizontal: js.extend(['SelectBox.js', 'SelectFilter2.js']) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_extra(self, request, obj=None, **kwargs): """Hook for customizing the number of extra inline forms.""" return self.extra def get_min_num(self, request, obj=None, **kwargs): """Hook for customizing the min number of inline forms.""" return self.min_num def get_max_num(self, request, obj=None, **kwargs): """Hook for customizing the max number of extra inline forms.""" return self.max_num def get_formset(self, request, obj=None, **kwargs): """Returns a BaseInlineFormSet class for use in admin add/change views.""" if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we use None, since that's the actual # default. exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "form": self.form, "formset": self.formset, "fk_name": self.fk_name, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "extra": self.get_extra(request, obj, **kwargs), "min_num": self.get_min_num(request, obj, **kwargs), "max_num": self.get_max_num(request, obj, **kwargs), "can_delete": can_delete, } defaults.update(kwargs) base_model_form = defaults['form'] class DeleteProtectedModelForm(base_model_form): def hand_clean_DELETE(self): """ We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic "deletion_field" of the InlineModelAdmin. """ if self.cleaned_data.get(DELETION_FIELD_NAME, False): using = router.db_for_write(self._meta.model) collector = NestedObjects(using=using) collector.collect([self.instance]) if collector.protected: objs = [] for p in collector.protected: objs.append( # Translators: Model verbose name and instance representation, suitable to be an item in a list _('%(class_name)s %(instance)s') % { 'class_name': p._meta.verbose_name, 'instance': p} ) params = {'class_name': self._meta.model._meta.verbose_name, 'instance': self.instance, 'related_objects': get_text_list(objs, _('and'))} msg = _("Deleting %(class_name)s %(instance)s would require " "deleting the following protected related objects: " "%(related_objects)s") raise ValidationError(msg, code='deleting_protected', params=params) def is_valid(self): result = super(DeleteProtectedModelForm, self).is_valid() self.hand_clean_DELETE() return result defaults['form'] = DeleteProtectedModelForm if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS return inlineformset_factory(self.parent_model, self.model, **defaults) def get_fields(self, request, obj=None): if self.fields: return self.fields form = self.get_formset(request, obj, fields=None).form return list(form.base_fields) + list(self.get_readonly_fields(request, obj)) def get_queryset(self, request): queryset = super(InlineModelAdmin, self).get_queryset(request) if not self.has_change_permission(request): queryset = queryset.none() return queryset def has_add_permission(self, request): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request) return super(InlineModelAdmin, self).has_add_permission(request) def has_change_permission(self, request, obj=None): opts = self.opts if opts.auto_created: # The model was auto-created as intermediary for a # ManyToMany-relationship, find the target model for field in opts.fields: if field.rel and field.rel.to != self.parent_model: opts = field.rel.to._meta break codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request, obj) return super(InlineModelAdmin, self).has_delete_permission(request, obj) class StackedInline(InlineModelAdmin): template = 'admin/edit_inline/stacked.html' class TabularInline(InlineModelAdmin): template = 'admin/edit_inline/tabular.html'
./CrossVul/dataset_final_sorted/CWE-264/py/bad_2042_1
crossvul-python_data_good_1801_0
# -*- coding: utf-8 -*- # # This file is part of Radicale Server - Calendar Server # Copyright © 2008 Nicolas Kandel # Copyright © 2008 Pascal Halter # Copyright © 2008-2013 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Rights management. Rights are based on a regex-based file whose name is specified in the config (section "right", key "file"). Authentication login is matched against the "user" key, and collection's path is matched against the "collection" key. You can use Python's ConfigParser interpolation values %(login)s and %(path)s. You can also get groups from the user regex in the collection with {0}, {1}, etc. For example, for the "user" key, ".+" means "authenticated user" and ".*" means "anybody" (including anonymous users). Section names are only used for naming the rule. Leading or ending slashes are trimmed from collection's path. """ import re import sys import os.path from .. import config, log # Manage Python2/3 different modules if sys.version_info[0] == 2: from ConfigParser import ConfigParser from StringIO import StringIO else: from configparser import ConfigParser from io import StringIO DEFINED_RIGHTS = { "authenticated": "[rw]\nuser:.+\ncollection:.*\npermission:rw", "owner_write": "[r]\nuser:.+\ncollection:.*\npermission:r\n" "[w]\nuser:.+\ncollection:^%(login)s(/.*)?$\npermission:w", "owner_only": "[rw]\nuser:.+\ncollection:^%(login)s(/.*)?$\npermission:rw", } def _read_from_sections(user, collection_url, permission): """Get regex sections.""" filename = os.path.expanduser(config.get("rights", "file")) rights_type = config.get("rights", "type").lower() # Prevent "regex injection" user_escaped = re.escape(user) collection_url_escaped = re.escape(collection_url) regex = ConfigParser({"login": user_escaped, "path": collection_url_escaped}) if rights_type in DEFINED_RIGHTS: log.LOGGER.debug("Rights type '%s'" % rights_type) regex.readfp(StringIO(DEFINED_RIGHTS[rights_type])) elif rights_type == "from_file": log.LOGGER.debug("Reading rights from file %s" % filename) if not regex.read(filename): log.LOGGER.error("File '%s' not found for rights" % filename) return False else: log.LOGGER.error("Unknown rights type '%s'" % rights_type) return False for section in regex.sections(): re_user = regex.get(section, "user") re_collection = regex.get(section, "collection") log.LOGGER.debug( "Test if '%s:%s' matches against '%s:%s' from section '%s'" % ( user, collection_url, re_user, re_collection, section)) user_match = re.match(re_user, user) if user_match: re_collection = re_collection.format(*user_match.groups()) if re.match(re_collection, collection_url): log.LOGGER.debug("Section '%s' matches" % section) if permission in regex.get(section, "permission"): return True else: log.LOGGER.debug("Section '%s' does not match" % section) return False def authorized(user, collection, permission): """Check if the user is allowed to read or write the collection. If the user is empty it checks for anonymous rights """ collection_url = collection.url.rstrip("/") or "/" if collection_url in (".well-known/carddav", ".well-known/caldav"): return permission == "r" rights_type = config.get("rights", "type").lower() return ( rights_type == "none" or _read_from_sections(user or "", collection_url, permission))
./CrossVul/dataset_final_sorted/CWE-264/py/good_1801_0
crossvul-python_data_bad_3633_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import os import re import shutil import tempfile import time import urllib from nova import block_device from nova import compute from nova import context from nova import crypto from nova import db from nova import exception from nova import flags from nova import ipv6 from nova import log as logging from nova import network from nova import rpc from nova import utils from nova import volume from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.compute import vm_states from nova.image import s3 FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') flags.DECLARE('service_down_time', 'nova.scheduler.driver') LOG = logging.getLogger("nova.api.cloud") def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { None: 'pending', vm_states.ACTIVE: 'running', vm_states.BUILDING: 'pending', vm_states.REBUILDING: 'pending', vm_states.DELETED: 'terminated', vm_states.STOPPED: 'stopped', vm_states.MIGRATING: 'migrate', vm_states.RESIZING: 'resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', vm_states.RESCUED: 'rescue', } def state_description_from_vm_state(vm_state): """Map the vm state to the server status string""" return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', 'ephemeral0': 'sda2', 'root': _DEFAULT_ROOT_DEVICE_NAME, 'swap': 'sda3'} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) self.setup() def __str__(self): return 'CloudController' def setup(self): """ Ensure the keychains and folders exist. """ # FIXME(ja): this should be moved to a nova-manage command, # if not setup throw exceptions instead of running # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): os.makedirs(FLAGS.keys_path) # Gen root CA, if we don't have one root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) if not os.path.exists(root_ca_path): genrootca_sh_path = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'CA', 'genrootca.sh') start = os.getcwd() if not os.path.exists(FLAGS.ca_path): os.makedirs(FLAGS.ca_path) os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path) os.chdir(start) def _get_mpi_data(self, context, project_id): result = {} search_opts = {'project_id': project_id} for instance in self.compute_api.get_all(context, search_opts=search_opts): if instance['fixed_ips']: line = '%s slots=%d' % (instance['fixed_ips'][0]['address'], instance['vcpus']) key = str(instance['key_name']) if key in result: result[key].append(line) else: result[key] = [line] return result def _get_availability_zone_by_host(self, context, host): services = db.service_get_all_by_host(context.elevated(), host) if len(services) > 0: return services[0]['availability_zone'] return 'unknown zone' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def _format_instance_mapping(self, ctxt, instance_ref): root_device_name = instance_ref['root_device_name'] if root_device_name is None: return _DEFAULT_MAPPINGS mappings = {} mappings['ami'] = block_device.strip_dev(root_device_name) mappings['root'] = root_device_name default_local_device = instance_ref.get('default_local_device') if default_local_device: mappings['ephemeral0'] = default_local_device default_swap_device = instance_ref.get('default_swap_device') if default_swap_device: mappings['swap'] = default_swap_device ebs_devices = [] # 'ephemeralN', 'swap' and ebs for bdm in db.block_device_mapping_get_all_by_instance( ctxt, instance_ref['id']): if bdm['no_device']: continue # ebs volume case if (bdm['volume_id'] or bdm['snapshot_id']): ebs_devices.append(bdm['device_name']) continue virtual_name = bdm['virtual_name'] if not virtual_name: continue if block_device.is_swap_or_ephemeral(virtual_name): mappings[virtual_name] = bdm['device_name'] # NOTE(yamahata): I'm not sure how ebs device should be numbered. # Right now sort by device name for deterministic # result. if ebs_devices: nebs = 0 ebs_devices.sort() for ebs in ebs_devices: mappings['ebs%d' % nebs] = ebs nebs += 1 return mappings def get_metadata(self, address): ctxt = context.get_admin_context() search_opts = {'fixed_ip': address} try: instance_ref = self.compute_api.get_all(ctxt, search_opts=search_opts) except exception.NotFound: instance_ref = None if not instance_ref: return None # This ensures that all attributes of the instance # are populated. instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain) host = instance_ref['host'] availability_zone = self._get_availability_zone_by_host(ctxt, host) floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) security_groups = db.security_group_get_by_instance(ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] mappings = self._format_instance_mapping(ctxt, instance_ref) data = { 'user-data': self._format_user_data(instance_ref), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi}} # public-keys should be in meta-data only if user specified one if instance_ref['key_name']: data['meta-data']['public-keys'] = { '0': {'_name': instance_ref['key_name'], 'openssh-key': instance_ref['key_data']}} for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], self._image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] if False: # TODO(vish): store product codes data['product-codes'] = [] return data def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services \ if service['host'] == host] for svc in hsvcs: delta = now - (svc['updated_at'] or svc['created_at']) alive = (delta.seconds <= FLAGS.service_down_time) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] s['display_name'] = snapshot['display_name'] s['display_description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) snapshot = self.volume_api.create_snapshot( context, volume_id=volume_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or \ not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_public_key(self, context, key_name, public_key, fingerprint=None): LOG.audit(_("Import key %s"), key_name, context=context) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key if fingerprint is None: tmpdir = tempfile.mkdtemp() pubfile = os.path.join(tmpdir, 'temp.pub') fh = open(pubfile, 'w') fh.write(public_key) fh.close() (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', '%s' % (pubfile)) fingerprint = out.split(' ')[1] shutil.rmtree(tmpdir) key['fingerprint'] = fingerprint db.key_pair_create(context, key) return True def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65536)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = \ db.security_group_get_by_name(context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr_ip) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if ip_protocol and from_port and to_port: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if from_port > to_port: raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: if 'group_id' in values: if rule['group_id'] == values['group_id']: return rule['id'] else: is_duplicate = True for key in ('cidr', 'from_port', 'to_port', 'protocol'): if rule[key] != values[key]: is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = "Revoke security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = "%s Not enough parameters to build a valid rule" raise exception.ApiError(_(err % rulesvalues)) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = "Authorize security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = "%s Not enough parameters to build a valid rule" raise exception.ApiError(_(err % rulesvalues)) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = '%s - This rule already exists in group' raise exception.ApiError(_(err) % values_for_rule) postvalues.append(values_for_rule) for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if type(instance_id) == list: ec2_id = instance_id[0] else: ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def get_ajax_console(self, context, instance_id, **kwargs): ec2_id = instance_id[0] instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_ajax_console(context, instance_id=instance_id) def get_vnc_console(self, context, instance_id, **kwargs): """Returns vnc browser url. Used by OS dashboard.""" ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_vnc_console(context, instance_id=instance_id) def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, volume_id=internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') != None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') != None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) self.volume_api.delete(context, volume_id=volume_id) return True def update_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: self.volume_api.update(context, volume_id=volume_id, fields=changes) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance_id=instance_id, volume_id=volume_id, device=device) volume = self.volume_api.get(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id=volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, instance_ref, result, key): kernel_id = instance_ref['kernel_id'] if kernel_id is None: return result[key] = self.image_ec2_id(instance_ref['kernel_id'], 'aki') def _format_ramdisk_id(self, instance_ref, result, key): ramdisk_id = instance_ref['ramdisk_id'] if ramdisk_id is None: return result[key] = self.image_ec2_id(instance_ref['ramdisk_id'], 'ari') @staticmethod def _format_user_data(instance_ref): return base64.b64decode(instance_ref['user_data']) def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.ApiError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): _unsupported_attribute(instance, result) def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): vm_state = instance['vm_state'] state_to_value = { vm_states.STOPPED: 'stopped', vm_states.DELETED: 'terminated', } value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = self._format_user_data(instance) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.ApiError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id=volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or _DEFAULT_ROOT_DEVICE_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts) except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self.image_ec2_id(instance['image_ref']) self._format_kernel_id(instance, i, 'kernelId') self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['power_state'], 'name': state_description_from_vm_state(instance['vm_state'])} fixed_addr = None floating_addr = None if instance['fixed_ips']: fixed = instance['fixed_ips'][0] fixed_addr = fixed['address'] if fixed['floating_ips']: floating_addr = fixed['floating_ips'][0]['address'] if fixed['network'] and use_v6: i['dnsNameV6'] = ipv6.to_global( fixed['network']['cidr_v6'], fixed['virtual_interface']['address'], instance['project_id']) i['privateDnsName'] = fixed_addr i['privateIpAddress'] = fixed_addr i['publicDnsName'] = floating_addr i['ipAddress'] = floating_addr or fixed_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] zone = self._get_availability_zone_by_host(context, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] if context.is_admin: iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, context.project_id) for floating_ip_ref in iterator: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): instance_id = floating_ip_ref['fixed_ip']['instance']['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, address=public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = kernel['id'] if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.ApiError(_('Image must be available')) instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), display_description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, reservation_id=instances[0]['reservation_id']) def _do_instance(self, action, context, ec2_id): instance_id = ec2utils.ec2_id_to_id(ec2_id) action(context, instance_id=instance_id) def _do_instances(self, action, context, instance_id): for ec2_id in instance_id: self._do_instance(action, context, ec2_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) self._do_instances(self.compute_api.delete, context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) self._do_instances(self.compute_api.reboot, context, instance_id) return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) self._do_instances(self.compute_api.stop, context, instance_id) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) self._do_instances(self.compute_api.start, context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" self._do_instance(self.compute_api.rescue, context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" self._do_instance(self.compute_api.unrescue, context, instance_id) return True def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **changes) return True @staticmethod def _image_type(image_type): """Converts to a three letter image type. aki, kernel => aki ari, ramdisk => ari anything else => ami """ if image_type == 'kernel': return 'aki' if image_type == 'ramdisk': return 'ari' if image_type not in ['aki', 'ari']: return 'ami' return image_type @staticmethod def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' try: return ec2utils.id_to_ec2_id(int(image_id), template=template) except ValueError: #TODO(wwolf): once we have ec2_id -> glance_id mapping # in place, this wont be necessary return "ami-00000000" def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if self._image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by BaseImageService to S3 format.""" i = {} image_type = self._image_type(image.get('container_format')) ec2_id = self.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = self.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image['properties'].get('owner_id') if name: i['imageLocation'] = "%s (%s)" % (image['properties']. get('image_location'), name) else: i['imageLocation'] = image['properties'].get('image_location') i['imageState'] = self._get_image_state(image) i['displayName'] = name i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = image.get('is_public') == True i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or _DEFAULT_ROOT_DEVICE_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = self._image_type(image.get('container_format')) image_id = self.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] metadata = {'properties': {'image_location': image_location}} if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = \ kwargs.get('root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): result['rootDeviceName'] = \ block_device.properties_root_device_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.ApiError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.ApiError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.ApiError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') return self.image_service.update(context, internal_id, image) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state == vm_states.ACTIVE: restart_instance = True self.compute_api.stop(context, instance_id=instance_id) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.ApiError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id vol = self.volume_api.get(context, volume_id=volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume_id=volume_id, name=vol['display_name'], description=vol['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_0
crossvul-python_data_good_3695_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): user_ref = self.update_user(context, user_id, user) try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The password has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('Password changed for %s, but existing tokens remain ' 'valid' % user_id) return user_ref def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3695_1
crossvul-python_data_bad_3695_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import kvs from keystone import exception from keystone import token class Token(kvs.Base, token.Driver): # Public interface def get_token(self, token_id): token = self.db.get('token-%s' % token_id) if (token and (token['expires'] is None or token['expires'] > datetime.datetime.utcnow())): return token else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data: data_copy['expires'] = self._get_default_expire_time() self.db.set('token-%s' % token_id, data_copy) return copy.deepcopy(data_copy) def delete_token(self, token_id): try: return self.db.delete('token-%s' % token_id) except KeyError: raise exception.TokenNotFound(token_id=token_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3695_2
crossvul-python_data_bad_4833_1
#!/usr/bin/env python2 __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' # Imports {{{ import os, math, json from base64 import b64encode from functools import partial from future_builtins import map from PyQt5.Qt import ( QSize, QSizePolicy, QUrl, Qt, pyqtProperty, QPainter, QPalette, QBrush, QDialog, QColor, QPoint, QImage, QRegion, QIcon, QAction, QMenu, pyqtSignal, QApplication, pyqtSlot, QKeySequence, QMimeData) from PyQt5.QtWebKitWidgets import QWebPage, QWebView from PyQt5.QtWebKit import QWebSettings, QWebElement from calibre.gui2.viewer.flip import SlideFlip from calibre.gui2.shortcuts import Shortcuts from calibre.gui2 import open_url from calibre import prints from calibre.customize.ui import all_viewer_plugins from calibre.gui2.viewer.keys import SHORTCUTS from calibre.gui2.viewer.javascript import JavaScriptLoader from calibre.gui2.viewer.position import PagePosition from calibre.gui2.viewer.config import config, ConfigDialog, load_themes from calibre.gui2.viewer.image_popup import ImagePopup from calibre.gui2.viewer.table_popup import TablePopup from calibre.gui2.viewer.inspector import WebInspector from calibre.gui2.viewer.gestures import GestureHandler from calibre.gui2.viewer.footnote import Footnotes from calibre.ebooks.oeb.display.webview import load_html from calibre.constants import isxp, iswindows, DEBUG, __version__ # }}} def apply_settings(settings, opts): settings.setFontSize(QWebSettings.DefaultFontSize, opts.default_font_size) settings.setFontSize(QWebSettings.DefaultFixedFontSize, opts.mono_font_size) settings.setFontSize(QWebSettings.MinimumLogicalFontSize, opts.minimum_font_size) settings.setFontSize(QWebSettings.MinimumFontSize, opts.minimum_font_size) settings.setFontFamily(QWebSettings.StandardFont, {'serif':opts.serif_family, 'sans':opts.sans_family, 'mono':opts.mono_family}[opts.standard_font]) settings.setFontFamily(QWebSettings.SerifFont, opts.serif_family) settings.setFontFamily(QWebSettings.SansSerifFont, opts.sans_family) settings.setFontFamily(QWebSettings.FixedFont, opts.mono_family) settings.setAttribute(QWebSettings.ZoomTextOnly, True) def apply_basic_settings(settings): # Security settings.setAttribute(QWebSettings.JavaEnabled, False) settings.setAttribute(QWebSettings.PluginsEnabled, False) settings.setAttribute(QWebSettings.JavascriptCanOpenWindows, False) settings.setAttribute(QWebSettings.JavascriptCanAccessClipboard, False) # PrivateBrowsing disables console messages # settings.setAttribute(QWebSettings.PrivateBrowsingEnabled, True) settings.setAttribute(QWebSettings.NotificationsEnabled, False) settings.setThirdPartyCookiePolicy(QWebSettings.AlwaysBlockThirdPartyCookies) # Miscellaneous settings.setAttribute(QWebSettings.LinksIncludedInFocusChain, True) settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True) class Document(QWebPage): # {{{ page_turn = pyqtSignal(object) mark_element = pyqtSignal(QWebElement) settings_changed = pyqtSignal() animated_scroll_done_signal = pyqtSignal() def set_font_settings(self, opts): settings = self.settings() apply_settings(settings, opts) def do_config(self, parent=None): d = ConfigDialog(self.shortcuts, parent) if d.exec_() == QDialog.Accepted: opts = config().parse() self.apply_settings(opts) def apply_settings(self, opts): with self.page_position: self.set_font_settings(opts) self.set_user_stylesheet(opts) self.misc_config(opts) self.settings_changed.emit() self.after_load() def __init__(self, shortcuts, parent=None, debug_javascript=False): QWebPage.__init__(self, parent) self.setObjectName("py_bridge") self.in_paged_mode = False # Use this to pass arbitrary JSON encodable objects between python and # javascript. In python get/set the value as: self.bridge_value. In # javascript, get/set the value as: py_bridge.value self.bridge_value = None self.first_load = True self.jump_to_cfi_listeners = set() self.debug_javascript = debug_javascript self.anchor_positions = {} self.index_anchors = set() self.current_language = None self.loaded_javascript = False self.js_loader = JavaScriptLoader( dynamic_coffeescript=self.debug_javascript) self.in_fullscreen_mode = False self.math_present = False self.setLinkDelegationPolicy(self.DelegateAllLinks) self.scroll_marks = [] self.shortcuts = shortcuts pal = self.palette() pal.setBrush(QPalette.Background, QColor(0xee, 0xee, 0xee)) self.setPalette(pal) self.page_position = PagePosition(self) settings = self.settings() # Fonts self.all_viewer_plugins = tuple(all_viewer_plugins()) for pl in self.all_viewer_plugins: pl.load_fonts() opts = config().parse() self.set_font_settings(opts) apply_basic_settings(settings) self.set_user_stylesheet(opts) self.misc_config(opts) # Load javascript self.mainFrame().javaScriptWindowObjectCleared.connect( self.add_window_objects) self.turn_off_internal_scrollbars() def turn_off_internal_scrollbars(self): mf = self.mainFrame() mf.setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff) mf.setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff) def set_user_stylesheet(self, opts): brules = ['background-color: %s !important'%opts.background_color] if opts.background_color else ['background-color: white'] prefix = ''' body { %s } '''%('; '.join(brules)) if opts.text_color: prefix += '\n\nbody, p, div { color: %s !important }'%opts.text_color raw = prefix + opts.user_css raw = '::selection {background:#ffff00; color:#000;}\n'+raw data = 'data:text/css;charset=utf-8;base64,' data += b64encode(raw.encode('utf-8')) self.settings().setUserStyleSheetUrl(QUrl(data)) def findText(self, q, flags): if self.hyphenatable: q = unicode(q) hyphenated_q = self.javascript( 'hyphenate_text(%s, "%s")' % (json.dumps(q, ensure_ascii=False), self.loaded_lang), typ='string') if hyphenated_q and QWebPage.findText(self, hyphenated_q, flags): return True return QWebPage.findText(self, q, flags) def misc_config(self, opts): self.hyphenate = opts.hyphenate self.hyphenate_default_lang = opts.hyphenate_default_lang self.do_fit_images = opts.fit_images self.page_flip_duration = opts.page_flip_duration self.enable_page_flip = self.page_flip_duration > 0.1 self.font_magnification_step = opts.font_magnification_step self.wheel_flips_pages = opts.wheel_flips_pages self.wheel_scroll_fraction = opts.wheel_scroll_fraction self.line_scroll_fraction = opts.line_scroll_fraction self.tap_flips_pages = opts.tap_flips_pages self.line_scrolling_stops_on_pagebreaks = opts.line_scrolling_stops_on_pagebreaks screen_width = QApplication.desktop().screenGeometry().width() # Leave some space for the scrollbar and some border self.max_fs_width = min(opts.max_fs_width, screen_width-50) self.max_fs_height = opts.max_fs_height self.fullscreen_clock = opts.fullscreen_clock self.fullscreen_scrollbar = opts.fullscreen_scrollbar self.fullscreen_pos = opts.fullscreen_pos self.start_in_fullscreen = opts.start_in_fullscreen self.show_fullscreen_help = opts.show_fullscreen_help self.use_book_margins = opts.use_book_margins self.cols_per_screen_portrait = opts.cols_per_screen_portrait self.cols_per_screen_landscape = opts.cols_per_screen_landscape self.side_margin = opts.side_margin self.top_margin, self.bottom_margin = opts.top_margin, opts.bottom_margin self.show_controls = opts.show_controls self.remember_current_page = opts.remember_current_page self.copy_bookmarks_to_file = opts.copy_bookmarks_to_file self.search_online_url = opts.search_online_url or 'https://www.google.com/search?q={text}' def fit_images(self): if self.do_fit_images and not self.in_paged_mode: self.javascript('setup_image_scaling_handlers()') def add_window_objects(self): self.mainFrame().addToJavaScriptWindowObject("py_bridge", self) self.javascript(''' Object.defineProperty(py_bridge, 'value', { get : function() { return JSON.parse(this._pass_json_value); }, set : function(val) { this._pass_json_value = JSON.stringify(val); } }); ''') self.loaded_javascript = False def load_javascript_libraries(self): if self.loaded_javascript: return self.loaded_javascript = True evaljs = self.mainFrame().evaluateJavaScript self.loaded_lang = self.js_loader(evaljs, self.current_language, self.hyphenate_default_lang) evaljs('window.calibre_utils.setup_epub_reading_system(%s, %s, %s, %s)' % tuple(map(json.dumps, ( 'calibre-desktop', __version__, 'paginated' if self.in_paged_mode else 'scrolling', 'dom-manipulation layout-changes mouse-events keyboard-events'.split())))) mjpath = P(u'viewer/mathjax').replace(os.sep, '/') if iswindows: mjpath = u'/' + mjpath self.javascript(u'window.mathjax.base = %s'%(json.dumps(mjpath, ensure_ascii=False))) for pl in self.all_viewer_plugins: pl.load_javascript(evaljs) evaljs('py_bridge.mark_element.connect(window.calibre_extract.mark)') @pyqtSlot() def animated_scroll_done(self): self.animated_scroll_done_signal.emit() @property def hyphenatable(self): # Qt fails to render soft hyphens correctly on windows xp return not isxp and self.hyphenate and getattr(self, 'loaded_lang', '') and not self.math_present @pyqtSlot() def init_hyphenate(self): if self.hyphenatable: self.javascript('do_hyphenation("%s")'%self.loaded_lang) @pyqtSlot(int) def page_turn_requested(self, backwards): self.page_turn.emit(bool(backwards)) def _pass_json_value_getter(self): val = json.dumps(self.bridge_value) return val def _pass_json_value_setter(self, value): self.bridge_value = json.loads(unicode(value)) _pass_json_value = pyqtProperty(str, fget=_pass_json_value_getter, fset=_pass_json_value_setter) def after_load(self, last_loaded_path=None): self.javascript('window.paged_display.read_document_margins()') self.set_bottom_padding(0) self.fit_images() w = 1 if iswindows else 0 self.math_present = self.javascript('window.mathjax.check_for_math(%d)' % w, bool) self.init_hyphenate() self.javascript('full_screen.save_margins()') if self.in_fullscreen_mode: self.switch_to_fullscreen_mode() if self.in_paged_mode: self.switch_to_paged_mode(last_loaded_path=last_loaded_path) self.read_anchor_positions(use_cache=False) evaljs = self.mainFrame().evaluateJavaScript for pl in self.all_viewer_plugins: pl.run_javascript(evaljs) self.first_load = False def colors(self): self.javascript(''' bs = getComputedStyle(document.body); py_bridge.value = [bs.backgroundColor, bs.color] ''') ans = self.bridge_value return (ans if isinstance(ans, list) else ['white', 'black']) def read_anchor_positions(self, use_cache=True): self.bridge_value = tuple(self.index_anchors) self.javascript(u''' py_bridge.value = book_indexing.anchor_positions(py_bridge.value, %s); '''%('true' if use_cache else 'false')) self.anchor_positions = self.bridge_value if not isinstance(self.anchor_positions, dict): # Some weird javascript error happened self.anchor_positions = {} return {k:tuple(v) for k, v in self.anchor_positions.iteritems()} def switch_to_paged_mode(self, onresize=False, last_loaded_path=None): if onresize and not self.loaded_javascript: return cols_per_screen = self.cols_per_screen_portrait if self.is_portrait else self.cols_per_screen_landscape cols_per_screen = max(1, min(5, cols_per_screen)) self.javascript(''' window.paged_display.use_document_margins = %s; window.paged_display.set_geometry(%d, %d, %d, %d); '''%( ('true' if self.use_book_margins else 'false'), cols_per_screen, self.top_margin, self.side_margin, self.bottom_margin )) force_fullscreen_layout = bool(getattr(last_loaded_path, 'is_single_page', False)) self.update_contents_size_for_paged_mode(force_fullscreen_layout) def update_contents_size_for_paged_mode(self, force_fullscreen_layout=None): # Setup the contents size to ensure that there is a right most margin. # Without this WebKit renders the final column with no margin, as the # columns extend beyond the boundaries (and margin) of body if force_fullscreen_layout is None: force_fullscreen_layout = self.javascript('window.paged_display.is_full_screen_layout', typ=bool) f = 'true' if force_fullscreen_layout else 'false' side_margin = self.javascript('window.paged_display.layout(%s)'%f, typ=int) mf = self.mainFrame() sz = mf.contentsSize() scroll_width = self.javascript('document.body.scrollWidth', int) # At this point sz.width() is not reliable, presumably because Qt # has not yet been updated if scroll_width > self.window_width: sz.setWidth(scroll_width+side_margin) self.setPreferredContentsSize(sz) self.javascript('window.paged_display.fit_images()') @property def column_boundaries(self): if not self.loaded_javascript: return (0, 1) self.javascript(u'py_bridge.value = paged_display.column_boundaries()') return tuple(self.bridge_value) def after_resize(self): if self.in_paged_mode: self.setPreferredContentsSize(QSize()) self.switch_to_paged_mode(onresize=True) self.javascript('if (window.mathjax) window.mathjax.after_resize();') def switch_to_fullscreen_mode(self): self.in_fullscreen_mode = True self.javascript('full_screen.on(%d, %d, %s)'%(self.max_fs_width, self.max_fs_height, 'true' if self.in_paged_mode else 'false')) def switch_to_window_mode(self): self.in_fullscreen_mode = False self.javascript('full_screen.off(%s)'%('true' if self.in_paged_mode else 'false')) @pyqtSlot(str) def debug(self, msg): prints(unicode(msg)) @pyqtSlot(int) def jump_to_cfi_finished(self, job_id): for l in self.jump_to_cfi_listeners: l(job_id) def reference_mode(self, enable): self.javascript(('enter' if enable else 'leave')+'_reference_mode()') def set_reference_prefix(self, prefix): self.javascript('reference_prefix = "%s"'%prefix) def goto(self, ref): self.javascript('goto_reference("%s")'%ref) def goto_bookmark(self, bm): if bm['type'] == 'legacy': bm = bm['pos'] bm = bm.strip() if bm.startswith('>'): bm = bm[1:].strip() self.javascript('scroll_to_bookmark("%s")'%bm) elif bm['type'] == 'cfi': self.page_position.to_pos(bm['pos']) def javascript(self, string, typ=None): ans = self.mainFrame().evaluateJavaScript(string) if typ in {'int', int}: try: return int(ans) except (TypeError, ValueError): return 0 if typ in {'float', float}: try: return float(ans) except (TypeError, ValueError): return 0.0 if typ == 'string': return ans or u'' if typ in {bool, 'bool'}: return bool(ans) return ans def javaScriptConsoleMessage(self, msg, lineno, msgid): if DEBUG or self.debug_javascript: prints(msg) def javaScriptAlert(self, frame, msg): if DEBUG: prints(msg) else: return QWebPage.javaScriptAlert(self, frame, msg) def scroll_by(self, dx=0, dy=0): self.mainFrame().scroll(dx, dy) def scroll_to(self, x=0, y=0): self.mainFrame().setScrollPosition(QPoint(x, y)) def jump_to_anchor(self, anchor): if not self.loaded_javascript: return self.javascript('window.paged_display.jump_to_anchor("%s")'%anchor) def element_ypos(self, elem): try: ans = int(elem.evaluateJavaScript('$(this).offset().top')) except (TypeError, ValueError): raise ValueError('No ypos found') return ans def elem_outer_xml(self, elem): return unicode(elem.toOuterXml()) def bookmark(self): pos = self.page_position.current_pos return {'type':'cfi', 'pos':pos} @property def at_bottom(self): return self.height - self.ypos <= self.window_height @property def at_top(self): return self.ypos <=0 def test(self): pass @property def ypos(self): return self.mainFrame().scrollPosition().y() @property def window_height(self): return self.javascript('window.innerHeight', 'int') @property def window_width(self): return self.javascript('window.innerWidth', 'int') @property def is_portrait(self): return self.window_width < self.window_height @property def xpos(self): return self.mainFrame().scrollPosition().x() @dynamic_property def scroll_fraction(self): def fget(self): if self.in_paged_mode: return self.javascript(''' ans = 0.0; if (window.paged_display) { ans = window.paged_display.current_pos(); } ans;''', typ='float') else: try: return abs(float(self.ypos)/(self.height-self.window_height)) except ZeroDivisionError: return 0. def fset(self, val): if self.in_paged_mode and self.loaded_javascript: self.javascript('paged_display.scroll_to_pos(%f)'%val) else: npos = val * (self.height - self.window_height) if npos < 0: npos = 0 self.scroll_to(x=self.xpos, y=npos) return property(fget=fget, fset=fset) @dynamic_property def page_number(self): ' The page number is the number of the page at the left most edge of the screen (starting from 0) ' def fget(self): if self.in_paged_mode: return self.javascript( 'ans = 0; if (window.paged_display) ans = window.paged_display.column_boundaries()[0]; ans;', typ='int') def fset(self, val): if self.in_paged_mode and self.loaded_javascript: self.javascript('if (window.paged_display) window.paged_display.scroll_to_column(%d)' % int(val)) return True return property(fget=fget, fset=fset) @property def page_dimensions(self): if self.in_paged_mode: return self.javascript( ''' ans = '' if (window.paged_display) ans = window.paged_display.col_width + ':' + window.paged_display.current_page_height; ans;''', typ='string') @property def hscroll_fraction(self): try: return float(self.xpos)/self.width except ZeroDivisionError: return 0. @property def height(self): # Note that document.body.offsetHeight does not include top and bottom # margins on body and in some cases does not include the top margin on # the first element inside body either. See ticket #8791 for an example # of the latter. q = self.mainFrame().contentsSize().height() if q < 0: # Don't know if this is still needed, but it can't hurt j = self.javascript('document.body.offsetHeight', 'int') if j >= 0: q = j return q @property def width(self): return self.mainFrame().contentsSize().width() # offsetWidth gives inaccurate results def set_bottom_padding(self, amount): s = QSize(-1, -1) if amount == 0 else QSize(self.viewportSize().width(), self.height+amount) self.setPreferredContentsSize(s) def extract_node(self): return unicode(self.mainFrame().evaluateJavaScript( 'window.calibre_extract.extract()')) # }}} class DocumentView(QWebView): # {{{ magnification_changed = pyqtSignal(object) DISABLED_BRUSH = QBrush(Qt.lightGray, Qt.Dense5Pattern) gesture_handler = lambda s, e: False last_loaded_path = None def initialize_view(self, debug_javascript=False): self.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform) self.flipper = SlideFlip(self) self.gesture_handler = GestureHandler(self) self.is_auto_repeat_event = False self.debug_javascript = debug_javascript self.shortcuts = Shortcuts(SHORTCUTS, 'shortcuts/viewer') self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)) self._size_hint = QSize(510, 680) self.initial_pos = 0.0 self.to_bottom = False self.document = Document(self.shortcuts, parent=self, debug_javascript=debug_javascript) self.footnotes = Footnotes(self) self.document.settings_changed.connect(self.footnotes.clone_settings) self.setPage(self.document) self.inspector = WebInspector(self, self.document) self.manager = None self._reference_mode = False self._ignore_scrollbar_signals = False self.loading_url = None self.loadFinished.connect(self.load_finished) self.document.linkClicked.connect(self.link_clicked) self.document.linkHovered.connect(self.link_hovered) self.document.selectionChanged[()].connect(self.selection_changed) self.document.animated_scroll_done_signal.connect(self.animated_scroll_done, type=Qt.QueuedConnection) self.document.page_turn.connect(self.page_turn_requested) copy_action = self.copy_action copy_action.setIcon(QIcon(I('edit-copy.png'))) copy_action.triggered.connect(self.copy, Qt.QueuedConnection) d = self.document self.unimplemented_actions = list(map(self.pageAction, [d.DownloadImageToDisk, d.OpenLinkInNewWindow, d.DownloadLinkToDisk, d.OpenImageInNewWindow, d.OpenLink, d.Reload, d.InspectElement])) self.search_online_action = QAction(QIcon(I('search.png')), '', self) self.search_online_action.triggered.connect(self.search_online) self.addAction(self.search_online_action) self.dictionary_action = QAction(QIcon(I('dictionary.png')), _('&Lookup in dictionary'), self) self.dictionary_action.triggered.connect(self.lookup) self.addAction(self.dictionary_action) self.image_popup = ImagePopup(self) self.table_popup = TablePopup(self) self.view_image_action = QAction(QIcon(I('view-image.png')), _('View &image...'), self) self.view_image_action.triggered.connect(self.image_popup) self.view_table_action = QAction(QIcon(I('view.png')), _('View &table...'), self) self.view_table_action.triggered.connect(self.popup_table) self.search_action = QAction(QIcon(I('dictionary.png')), _('&Search for next occurrence'), self) self.search_action.triggered.connect(self.search_next) self.addAction(self.search_action) self.goto_location_action = QAction(_('Go to...'), self) self.goto_location_menu = m = QMenu(self) self.goto_location_actions = a = { 'Next Page': self.next_page, 'Previous Page': self.previous_page, 'Section Top' : partial(self.scroll_to, 0), 'Document Top': self.goto_document_start, 'Section Bottom':partial(self.scroll_to, 1), 'Document Bottom': self.goto_document_end, 'Next Section': self.goto_next_section, 'Previous Section': self.goto_previous_section, } for name, key in [(_('Next Section'), 'Next Section'), (_('Previous Section'), 'Previous Section'), (None, None), (_('Document Start'), 'Document Top'), (_('Document End'), 'Document Bottom'), (None, None), (_('Section Start'), 'Section Top'), (_('Section End'), 'Section Bottom'), (None, None), (_('Next Page'), 'Next Page'), (_('Previous Page'), 'Previous Page')]: if key is None: m.addSeparator() else: m.addAction(name, a[key], self.shortcuts.get_sequences(key)[0]) self.goto_location_action.setMenu(self.goto_location_menu) self.restore_fonts_action = QAction(_('Default font size'), self) self.restore_fonts_action.setCheckable(True) self.restore_fonts_action.triggered.connect(self.restore_font_size) def goto_next_section(self, *args): if self.manager is not None: self.manager.goto_next_section() def goto_previous_section(self, *args): if self.manager is not None: self.manager.goto_previous_section() def goto_document_start(self, *args): if self.manager is not None: self.manager.goto_start() def goto_document_end(self, *args): if self.manager is not None: self.manager.goto_end() @property def copy_action(self): return self.pageAction(self.document.Copy) def animated_scroll_done(self): if self.manager is not None: self.manager.scrolled(self.document.scroll_fraction) def reference_mode(self, enable): self._reference_mode = enable self.document.reference_mode(enable) def goto(self, ref): self.document.goto(ref) def goto_bookmark(self, bm): self.document.goto_bookmark(bm) def config(self, parent=None): self.document.do_config(parent) if self.document.in_fullscreen_mode: self.document.switch_to_fullscreen_mode() self.setFocus(Qt.OtherFocusReason) def load_theme(self, theme_id): themes = load_themes() theme = themes[theme_id] opts = config(theme).parse() self.document.apply_settings(opts) if self.document.in_fullscreen_mode: self.document.switch_to_fullscreen_mode() self.setFocus(Qt.OtherFocusReason) def bookmark(self): return self.document.bookmark() @property def selected_text(self): return self.document.selectedText().replace(u'\u00ad', u'').strip() def copy(self): self.document.triggerAction(self.document.Copy) c = QApplication.clipboard() md = c.mimeData() if iswindows: nmd = QMimeData() nmd.setHtml(md.html().replace(u'\u00ad', '')) md = nmd md.setText(self.selected_text) QApplication.clipboard().setMimeData(md) def selection_changed(self): if self.manager is not None: self.manager.selection_changed(self.selected_text) def _selectedText(self): t = unicode(self.selectedText()).strip() if not t: return u'' if len(t) > 40: t = t[:40] + u'...' t = t.replace(u'&', u'&&') return _("S&earch online for '%s'")%t def popup_table(self): html = self.document.extract_node() self.table_popup(html, QUrl.fromLocalFile(self.last_loaded_path), self.document.font_magnification_step) def contextMenuEvent(self, ev): from_touch = ev.reason() == ev.Other mf = self.document.mainFrame() r = mf.hitTestContent(ev.pos()) img = r.pixmap() elem = r.element() if elem.isNull(): elem = r.enclosingBlockElement() table = None parent = elem while not parent.isNull(): if (unicode(parent.tagName()) == u'table' or unicode(parent.localName()) == u'table'): table = parent break parent = parent.parent() self.image_popup.current_img = img self.image_popup.current_url = r.imageUrl() menu = self.document.createStandardContextMenu() for action in self.unimplemented_actions: menu.removeAction(action) if not img.isNull(): menu.addAction(self.view_image_action) if table is not None: self.document.mark_element.emit(table) menu.addAction(self.view_table_action) text = self._selectedText() if text and img.isNull(): self.search_online_action.setText(text) for x, sc in (('search_online', 'Search online'), ('dictionary', 'Lookup word'), ('search', 'Next occurrence')): ac = getattr(self, '%s_action' % x) menu.addAction(ac.icon(), '%s [%s]' % (unicode(ac.text()), ','.join(self.shortcuts.get_shortcuts(sc))), ac.trigger) if from_touch and self.manager is not None: word = unicode(mf.evaluateJavaScript('window.calibre_utils.word_at_point(%f, %f)' % (ev.pos().x(), ev.pos().y())) or '') if word: menu.addAction(self.dictionary_action.icon(), _('Lookup %s in the dictionary') % word, partial(self.manager.lookup, word)) menu.addAction(self.search_online_action.icon(), _('Search for %s online') % word, partial(self.do_search_online, word)) if not text and img.isNull(): menu.addSeparator() if self.manager.action_back.isEnabled(): menu.addAction(self.manager.action_back) if self.manager.action_forward.isEnabled(): menu.addAction(self.manager.action_forward) menu.addAction(self.goto_location_action) if self.manager is not None: menu.addSeparator() menu.addAction(self.manager.action_table_of_contents) menu.addSeparator() menu.addAction(self.manager.action_font_size_larger) self.restore_fonts_action.setChecked(self.multiplier == 1) menu.addAction(self.restore_fonts_action) menu.addAction(self.manager.action_font_size_smaller) menu.addSeparator() menu.addAction(_('Inspect'), self.inspect) if not text and img.isNull() and self.manager is not None: menu.addSeparator() if (not self.document.show_controls or self.document.in_fullscreen_mode) and self.manager is not None: menu.addAction(self.manager.toggle_toolbar_action) menu.addAction(self.manager.action_full_screen) menu.addSeparator() menu.addAction(self.manager.action_reload) menu.addAction(self.manager.action_quit) for plugin in self.document.all_viewer_plugins: plugin.customize_context_menu(menu, ev, r) if from_touch: from calibre.constants import plugins pi = plugins['progress_indicator'][0] for x in (menu, self.goto_location_menu): if hasattr(pi, 'set_touch_menu_style'): pi.set_touch_menu_style(x) helpt = QAction(QIcon(I('help.png')), _('Show supported touch screen gestures'), menu) helpt.triggered.connect(self.gesture_handler.show_help) menu.insertAction(menu.actions()[0], helpt) else: self.goto_location_menu.setStyle(self.style()) self.context_menu = menu menu.exec_(ev.globalPos()) def inspect(self): self.inspector.show() self.inspector.raise_() self.pageAction(self.document.InspectElement).trigger() def lookup(self, *args): if self.manager is not None: t = unicode(self.selectedText()).strip() if t: self.manager.lookup(t.split()[0]) def search_next(self): if self.manager is not None: t = unicode(self.selectedText()).strip() if t: self.manager.search.set_search_string(t) def search_online(self): t = unicode(self.selectedText()).strip() if t: self.do_search_online(t) def do_search_online(self, text): url = self.document.search_online_url.replace('{text}', QUrl().toPercentEncoding(text)) if not isinstance(url, bytes): url = url.encode('utf-8') open_url(QUrl.fromEncoded(url)) def set_manager(self, manager): self.manager = manager self.scrollbar = manager.horizontal_scrollbar self.scrollbar.valueChanged[(int)].connect(self.scroll_horizontally) def scroll_horizontally(self, amount): self.document.scroll_to(y=self.document.ypos, x=amount) @property def scroll_pos(self): return (self.document.ypos, self.document.ypos + self.document.window_height) @property def viewport_rect(self): # (left, top, right, bottom) of the viewport in document co-ordinates # When in paged mode, left and right are the numbers of the columns # at the left edge and *after* the right edge of the viewport d = self.document if d.in_paged_mode: try: l, r = d.column_boundaries except ValueError: l, r = (0, 1) else: l, r = d.xpos, d.xpos + d.window_width return (l, d.ypos, r, d.ypos + d.window_height) def link_hovered(self, link, text, context): link, text = unicode(link), unicode(text) if link: self.setCursor(Qt.PointingHandCursor) else: self.unsetCursor() def link_clicked(self, url): if self.manager is not None: self.manager.link_clicked(url) def sizeHint(self): return self._size_hint @dynamic_property def scroll_fraction(self): def fget(self): return self.document.scroll_fraction def fset(self, val): self.document.scroll_fraction = float(val) return property(fget=fget, fset=fset) @property def hscroll_fraction(self): return self.document.hscroll_fraction @property def content_size(self): return self.document.width, self.document.height @dynamic_property def current_language(self): def fget(self): return self.document.current_language def fset(self, val): self.document.current_language = val return property(fget=fget, fset=fset) def search(self, text, backwards=False): flags = self.document.FindBackward if backwards else self.document.FindFlags(0) found = self.document.findText(text, flags) if found and self.document.in_paged_mode: self.document.javascript('paged_display.snap_to_selection()') return found def path(self): return os.path.abspath(unicode(self.url().toLocalFile())) def load_path(self, path, pos=0.0): self.initial_pos = pos self.last_loaded_path = path # This is needed otherwise percentage margins on body are not correctly # evaluated in read_document_margins() in paged mode. self.document.setPreferredContentsSize(QSize()) def callback(lu): self.loading_url = lu if self.manager is not None: self.manager.load_started() load_html(path, self, codec=getattr(path, 'encoding', 'utf-8'), mime_type=getattr(path, 'mime_type', 'text/html'), pre_load_callback=callback) entries = set() for ie in getattr(path, 'index_entries', []): if ie.start_anchor: entries.add(ie.start_anchor) if ie.end_anchor: entries.add(ie.end_anchor) self.document.index_anchors = entries def initialize_scrollbar(self): if getattr(self, 'scrollbar', None) is not None: if self.document.in_paged_mode: self.scrollbar.setVisible(False) return delta = self.document.width - self.size().width() if delta > 0: self._ignore_scrollbar_signals = True self.scrollbar.blockSignals(True) self.scrollbar.setRange(0, delta) self.scrollbar.setValue(0) self.scrollbar.setSingleStep(1) self.scrollbar.setPageStep(int(delta/10.)) self.scrollbar.setVisible(delta > 0) self.scrollbar.blockSignals(False) self._ignore_scrollbar_signals = False def load_finished(self, ok): if self.loading_url is None: # An <iframe> finished loading return self.loading_url = None self.document.load_javascript_libraries() self.document.after_load(self.last_loaded_path) self._size_hint = self.document.mainFrame().contentsSize() scrolled = False if self.to_bottom: self.to_bottom = False self.initial_pos = 1.0 if self.initial_pos > 0.0: scrolled = True self.scroll_to(self.initial_pos, notify=False) self.initial_pos = 0.0 self.update() self.initialize_scrollbar() self.document.reference_mode(self._reference_mode) if self.manager is not None: spine_index = self.manager.load_finished(bool(ok)) if spine_index > -1: self.document.set_reference_prefix('%d.'%(spine_index+1)) if scrolled: self.manager.scrolled(self.document.scroll_fraction, onload=True) if self.flipper.isVisible(): if self.flipper.running: self.flipper.setVisible(False) else: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) @classmethod def test_line(cls, img, y): 'Test if line contains pixels of exactly the same color' start = img.pixel(0, y) for i in range(1, img.width()): if img.pixel(i, y) != start: return False return True def current_page_image(self, overlap=-1): if overlap < 0: overlap = self.height() img = QImage(self.width(), overlap, QImage.Format_ARGB32_Premultiplied) painter = QPainter(img) painter.setRenderHints(self.renderHints()) self.document.mainFrame().render(painter, QRegion(0, 0, self.width(), overlap)) painter.end() return img def find_next_blank_line(self, overlap): img = self.current_page_image(overlap) for i in range(overlap-1, -1, -1): if self.test_line(img, i): self.scroll_by(y=i, notify=False) return self.scroll_by(y=overlap) def previous_page(self): if self.flipper.running and not self.is_auto_repeat_event: return if self.loading_url is not None: return epf = self.document.enable_page_flip and not self.is_auto_repeat_event if self.document.in_paged_mode: loc = self.document.javascript( 'paged_display.previous_screen_location()', typ='int') if loc < 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.manager.previous_document() else: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.document.scroll_to(x=loc, y=0) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return delta_y = self.document.window_height - 25 if self.document.at_top: if self.manager is not None: self.to_bottom = True if epf: self.flipper.initialize(self.current_page_image(), False) self.manager.previous_document() else: opos = self.document.ypos upper_limit = opos - delta_y if upper_limit < 0: upper_limit = 0 if upper_limit < opos: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.document.scroll_to(self.document.xpos, upper_limit) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) def next_page(self): if self.flipper.running and not self.is_auto_repeat_event: return if self.loading_url is not None: return epf = self.document.enable_page_flip and not self.is_auto_repeat_event if self.document.in_paged_mode: loc = self.document.javascript( 'paged_display.next_screen_location()', typ='int') if loc < 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() else: if epf: self.flipper.initialize(self.current_page_image()) self.document.scroll_to(x=loc, y=0) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return window_height = self.document.window_height document_height = self.document.height ddelta = document_height - window_height # print '\nWindow height:', window_height # print 'Document height:', self.document.height delta_y = window_height - 25 if self.document.at_bottom or ddelta <= 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() elif ddelta < 25: self.scroll_by(y=ddelta) return else: oopos = self.document.ypos # print 'Original position:', oopos self.document.set_bottom_padding(0) opos = self.document.ypos # print 'After set padding=0:', self.document.ypos if opos < oopos: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() return # oheight = self.document.height lower_limit = opos + delta_y # Max value of top y co-ord after scrolling max_y = self.document.height - window_height # The maximum possible top y co-ord if max_y < lower_limit: padding = lower_limit - max_y if padding == window_height: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() return # print 'Setting padding to:', lower_limit - max_y self.document.set_bottom_padding(lower_limit - max_y) if epf: self.flipper.initialize(self.current_page_image()) # print 'Document height:', self.document.height # print 'Height change:', (self.document.height - oheight) max_y = self.document.height - window_height lower_limit = min(max_y, lower_limit) # print 'Scroll to:', lower_limit if lower_limit > opos: self.document.scroll_to(self.document.xpos, lower_limit) actually_scrolled = self.document.ypos - opos # print 'After scroll pos:', self.document.ypos # print 'Scrolled by:', self.document.ypos - opos self.find_next_blank_line(window_height - actually_scrolled) # print 'After blank line pos:', self.document.ypos if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) # print 'After all:', self.document.ypos def page_turn_requested(self, backwards): if backwards: self.previous_page() else: self.next_page() def scroll_by(self, x=0, y=0, notify=True): old_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) self.document.scroll_by(x, y) new_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if notify and self.manager is not None and new_pos != old_pos: self.manager.scrolled(self.scroll_fraction) def scroll_to(self, pos, notify=True): if self._ignore_scrollbar_signals: return old_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if self.document.in_paged_mode: if isinstance(pos, basestring): self.document.jump_to_anchor(pos) else: self.document.scroll_fraction = pos else: if isinstance(pos, basestring): self.document.jump_to_anchor(pos) else: if pos >= 1: self.document.scroll_to(0, self.document.height) else: y = int(math.ceil( pos*(self.document.height-self.document.window_height))) self.document.scroll_to(0, y) new_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if notify and self.manager is not None and new_pos != old_pos: self.manager.scrolled(self.scroll_fraction) @dynamic_property def multiplier(self): def fget(self): return self.zoomFactor() def fset(self, val): oval = self.zoomFactor() self.setZoomFactor(val) if val != oval: if self.document.in_paged_mode: self.document.update_contents_size_for_paged_mode() self.magnification_changed.emit(val) return property(fget=fget, fset=fset) def magnify_fonts(self, amount=None): if amount is None: amount = self.document.font_magnification_step with self.document.page_position: self.multiplier += amount return self.document.scroll_fraction def shrink_fonts(self, amount=None): if amount is None: amount = self.document.font_magnification_step if self.multiplier >= amount: with self.document.page_position: self.multiplier -= amount return self.document.scroll_fraction def restore_font_size(self): with self.document.page_position: self.multiplier = 1 return self.document.scroll_fraction def changeEvent(self, event): if event.type() == event.EnabledChange: self.update() return QWebView.changeEvent(self, event) def paintEvent(self, event): painter = QPainter(self) painter.setRenderHints(self.renderHints()) self.document.mainFrame().render(painter, event.region()) if not self.isEnabled(): painter.fillRect(event.region().boundingRect(), self.DISABLED_BRUSH) painter.end() def wheelEvent(self, event): if event.phase() not in (Qt.ScrollUpdate, 0): # 0 is Qt.NoScrollPhase which is not yet available in PyQt return mods = event.modifiers() num_degrees = event.angleDelta().y() // 8 if mods & Qt.CTRL: if self.manager is not None and num_degrees != 0: (self.manager.font_size_larger if num_degrees > 0 else self.manager.font_size_smaller)() return if self.document.in_paged_mode: if abs(num_degrees) < 15: return typ = 'screen' if self.document.wheel_flips_pages else 'col' direction = 'next' if num_degrees < 0 else 'previous' loc = self.document.javascript('paged_display.%s_%s_location()'%( direction, typ), typ='int') if loc > -1: self.document.scroll_to(x=loc, y=0) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) event.accept() elif self.manager is not None: if direction == 'next': self.manager.next_document() else: self.manager.previous_document() event.accept() return if num_degrees < -14: if self.document.wheel_flips_pages: self.next_page() event.accept() return if self.document.at_bottom: self.scroll_by(y=15) # at_bottom can lie on windows if self.manager is not None: self.manager.next_document() event.accept() return elif num_degrees > 14: if self.document.wheel_flips_pages: self.previous_page() event.accept() return if self.document.at_top: if self.manager is not None: self.manager.previous_document() event.accept() return ret = QWebView.wheelEvent(self, event) num_degrees_h = event.angleDelta().x() // 8 vertical = abs(num_degrees) > abs(num_degrees_h) scroll_amount = ((num_degrees if vertical else num_degrees_h)/ 120.0) * .2 * -1 * 8 dim = self.document.viewportSize().height() if vertical else self.document.viewportSize().width() amt = dim * scroll_amount mult = -1 if amt < 0 else 1 if self.document.wheel_scroll_fraction != 100: amt = mult * max(1, abs(int(amt * self.document.wheel_scroll_fraction / 100.))) self.scroll_by(0, amt) if vertical else self.scroll_by(amt, 0) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return ret def keyPressEvent(self, event): if not self.handle_key_press(event): return QWebView.keyPressEvent(self, event) def paged_col_scroll(self, forward=True, scroll_past_end=True): dir = 'next' if forward else 'previous' loc = self.document.javascript( 'paged_display.%s_col_location()'%dir, typ='int') if loc > -1: self.document.scroll_to(x=loc, y=0) self.manager.scrolled(self.document.scroll_fraction) elif scroll_past_end: (self.manager.next_document() if forward else self.manager.previous_document()) def handle_key_press(self, event): handled = True key = self.shortcuts.get_match(event) func = self.goto_location_actions.get(key, None) if func is not None: self.is_auto_repeat_event = event.isAutoRepeat() try: func() finally: self.is_auto_repeat_event = False elif key == 'Down': if self.document.in_paged_mode: self.paged_col_scroll(scroll_past_end=not self.document.line_scrolling_stops_on_pagebreaks) else: if (not self.document.line_scrolling_stops_on_pagebreaks and self.document.at_bottom): self.manager.next_document() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(y=amt) elif key == 'Up': if self.document.in_paged_mode: self.paged_col_scroll(forward=False, scroll_past_end=not self.document.line_scrolling_stops_on_pagebreaks) else: if (not self.document.line_scrolling_stops_on_pagebreaks and self.document.at_top): self.manager.previous_document() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(y=-amt) elif key == 'Left': if self.document.in_paged_mode: self.paged_col_scroll(forward=False) else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(x=-amt) elif key == 'Right': if self.document.in_paged_mode: self.paged_col_scroll() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(x=amt) elif key == 'Back': if self.manager is not None: self.manager.back(None) elif key == 'Forward': if self.manager is not None: self.manager.forward(None) elif event.matches(QKeySequence.Copy): self.copy() else: handled = False return handled def resizeEvent(self, event): if self.manager is not None: self.manager.viewport_resize_started(event) return QWebView.resizeEvent(self, event) def event(self, ev): if self.gesture_handler(ev): return True return QWebView.event(self, ev) def mouseMoveEvent(self, ev): if self.document.in_paged_mode and ev.buttons() & Qt.LeftButton and not self.rect().contains(ev.pos(), True): # Prevent this event from causing WebKit to scroll the viewport # See https://bugs.launchpad.net/bugs/1464862 return return QWebView.mouseMoveEvent(self, ev) def mouseReleaseEvent(self, ev): r = self.document.mainFrame().hitTestContent(ev.pos()) a, url = r.linkElement(), r.linkUrl() if url.isValid() and not a.isNull() and self.manager is not None: fd = self.footnotes.get_footnote_data(a, url) if fd: self.footnotes.show_footnote(fd) self.manager.show_footnote_view() ev.accept() return opos = self.document.ypos if self.manager is not None: prev_pos = self.manager.update_page_number() ret = QWebView.mouseReleaseEvent(self, ev) if self.manager is not None and opos != self.document.ypos: self.manager.scrolled(self.scroll_fraction) self.manager.internal_link_clicked(prev_pos) return ret def follow_footnote_link(self): qurl = self.footnotes.showing_url if qurl and qurl.isValid(): self.link_clicked(qurl) # }}}
./CrossVul/dataset_final_sorted/CWE-264/py/bad_4833_1
crossvul-python_data_good_3695_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Token service.""" import datetime from keystone import config from keystone import exception from keystone.common import manager CONF = config.CONF config.register_int('expiration', group='token', default=86400) class Manager(manager.Manager): """Default pivot point for the Token backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.token.driver) class Driver(object): """Interface description for a Token driver.""" def get_token(self, token_id): """Get a token by id. :param token_id: identity of the token :type token_id: string :returns: token_ref :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def create_token(self, token_id, data): """Create a token by id and data. :param token_id: identity of the token :type token_id: string :param data: dictionary with additional reference information :: { expires='' id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref } :type data: dict :returns: token_ref or None. """ raise exception.NotImplemented() def delete_token(self, token_id): """Deletes a token by id. :param token_id: identity of the token :type token_id: string :returns: None. :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def list_tokens(self, user_id): """Returns a list of current token_id's for a user :param user_id: identity of the user :type user_id: string :returns: list of token_id's """ raise exception.NotImplemented() def _get_default_expire_time(self): """Determine when a token should expire based on the config. :returns: a naive utc datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) return datetime.datetime.utcnow() + expire_delta
./CrossVul/dataset_final_sorted/CWE-264/py/good_3695_4
crossvul-python_data_bad_3634_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova.api.openstack import extensions from nova.db.sqlalchemy import api as sqlalchemy_api from nova import db from nova import exception from nova import quota authorize = extensions.extension_authorizer('compute', 'quotas') quota_resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores'] class QuotaTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_set', selector='quota_set') root.set('id') for resource in quota_resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" result = dict(id=str(project_id)) for resource in quota_resources: result[resource] = quota_set[resource] return dict(quota_set=result) @wsgi.serializers(xml=QuotaTemplate) def show(self, req, id): context = req.environ['nova.context'] authorize(context) try: sqlalchemy_api.authorize_project_context(context, id) return self._format_quota_set(id, quota.get_project_quotas(context, id)) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() @wsgi.serializers(xml=QuotaTemplate) def update(self, req, id, body): context = req.environ['nova.context'] authorize(context) project_id = id for key in body['quota_set'].keys(): if key in quota_resources: value = int(body['quota_set'][key]) try: db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_set': quota.get_project_quotas(context, project_id)} @wsgi.serializers(xml=QuotaTemplate) def defaults(self, req, id): authorize(req.environ['nova.context']) return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): """Quotas management support""" name = "Quotas" alias = "os-quota-sets" namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_1
crossvul-python_data_good_3634_2
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom from webob import exc import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) allowed = quota.allowed_security_group_rules(context, parent_group_id, 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_2
crossvul-python_data_bad_3632_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova import utils from nova import volume FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3632_0
crossvul-python_data_good_3632_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova.openstack.common import cfg from nova import flags quota_opts = [ cfg.IntOpt('quota_instances', default=10, help='number of instances allowed per project'), cfg.IntOpt('quota_cores', default=20, help='number of instance cores allowed per project'), cfg.IntOpt('quota_ram', default=50 * 1024, help='megabytes of instance ram allowed per project'), cfg.IntOpt('quota_volumes', default=10, help='number of volumes allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of volume gigabytes allowed per project'), cfg.IntOpt('quota_floating_ips', default=10, help='number of floating ips allowed per project'), cfg.IntOpt('quota_metadata_items', default=128, help='number of metadata items allowed per instance'), cfg.IntOpt('quota_injected_files', default=5, help='number of injected files allowed'), cfg.IntOpt('quota_injected_file_content_bytes', default=10 * 1024, help='number of bytes allowed per injected file'), cfg.IntOpt('quota_injected_file_path_bytes', default=255, help='number of bytes allowed per injected file path'), cfg.IntOpt('quota_security_groups', default=10, help='number of security groups per project'), cfg.IntOpt('quota_security_group_rules', default=20, help='number of security rules per security group'), ] FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) quota_resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores', 'security_groups', 'security_group_rules'] def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_injected_files, 'injected_file_content_bytes': FLAGS.quota_injected_file_content_bytes, 'security_groups': FLAGS.quota_security_groups, 'security_group_rules': FLAGS.quota_security_group_rules, } # -1 in the quota flags means unlimited return defaults def get_class_quotas(context, quota_class, defaults=None): """Update defaults with the quota class values.""" if not defaults: defaults = _get_default_quotas() quota = db.quota_class_get_all_by_name(context, quota_class) for key in defaults.keys(): if key in quota: defaults[key] = quota[key] return defaults def get_project_quotas(context, project_id): defaults = _get_default_quotas() if context.quota_class: get_class_quotas(context, context.quota_class, defaults) quota = db.quota_get_all_by_project(context, project_id) for key in defaults.keys(): if key in quota: defaults[key] = quota[key] return defaults def _get_request_allotment(requested, used, quota): if quota == -1: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) if instance_type['vcpus']: allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus']) if instance_type['memory_mb']: allowed_instances = min(allowed_instances, allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def allowed_security_groups(context, requested_security_groups): """Check quota and return min(requested, allowed) security groups.""" project_id = context.project_id context = context.elevated() used_sec_groups = db.security_group_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_sec_groups = _get_request_allotment(requested_security_groups, used_sec_groups, quota['security_groups']) return min(requested_security_groups, allowed_sec_groups) def allowed_security_group_rules(context, security_group_id, requested_rules): """Check quota and return min(requested, allowed) sec group rules.""" project_id = context.project_id context = context.elevated() used_rules = db.security_group_rule_count_by_group(context, security_group_id) quota = get_project_quotas(context, project_id) allowed_rules = _get_request_allotment(requested_rules, used_rules, quota['security_group_rules']) return min(requested_rules, allowed_rules) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_injected_file_path_bytes
./CrossVul/dataset_final_sorted/CWE-264/py/good_3632_4
crossvul-python_data_good_3771_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import sys import traceback import eventlet from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable) from glance.api import common from glance.api import policy import glance.api.v1 from glance import context from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance.store import (create_stores, get_from_backend, get_size_from_backend, safe_delete_from_backend, schedule_delayed_delete_from_backend, get_store_from_location, get_store_from_scheme) LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] # Defined at module level due to _is_opt_registered # identity check (not equality). default_store_opt = cfg.StrOpt('default_store', default='file') CONF = cfg.CONF CONF.register_opt(default_store_opt) def validate_image_meta(req, values): name = values.get('name') disk_format = values.get('disk_format') container_format = values.get('container_format') if 'disk_format' in values: if not disk_format in DISK_FORMATS: msg = "Invalid disk format '%s' for image." % disk_format raise HTTPBadRequest(explanation=msg, request=req) if 'container_format' in values: if not container_format in CONTAINER_FORMATS: msg = "Invalid container format '%s' for image." % container_format raise HTTPBadRequest(explanation=msg, request=req) if name and len(name) > 255: msg = _('Image name too long: %d') % len(name) raise HTTPBadRequest(explanation=msg, request=req) amazon_formats = ('aki', 'ari', 'ami') if disk_format in amazon_formats or container_format in amazon_formats: if disk_format is None: values['disk_format'] = container_format elif container_format is None: values['container_format'] = disk_format elif container_format != disk_format: msg = ("Invalid mix of disk and container formats. " "When setting a disk or container format to " "one of 'aki', 'ari', or 'ami', the container " "and disk formats must match.") raise HTTPBadRequest(explanation=msg, request=req) return values class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ def __init__(self): create_stores() self.verify_scheme_or_exit(CONF.default_store) self.notifier = notifier.Notifier() registry.configure_registry_client() self.policy = policy.Enforcer() self.pool = eventlet.GreenPool(size=1024) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(context, where): try: image_data, image_size = get_from_backend(context, where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') self._enforce(req, 'download_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(req.context, image_meta['location']) image_iterator = utils.cooperative_iter(image_iterator) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(req.context, image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from) except Exception as e: self._safe_kill(req, image_meta['id']) msg = _("Copy from external source failed: %s") % e LOG.error(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") LOG.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file scheme = req.headers.get('x-image-meta-store', CONF.default_store) store = self.get_store_or_400(req, scheme) image_id = image_meta['id'] LOG.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug(_("Uploading image data for image %(image_id)s " "to %(scheme)s store"), locals()) try: location, size, checksum = store.add( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size']) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() LOG.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store LOG.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e LOG.error(msg) self._safe_kill(req, image_id) raise HTTPConflict(explanation=msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e LOG.error(msg) self._safe_kill(req, image_id) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded, e: msg = _("Denying attempt to upload image larger than %d bytes.") self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg % CONF.image_size_cap, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. raise e except Exception, e: tb_info = traceback.format_exc() LOG.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) raise HTTPBadRequest(explanation=msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: image_meta_data = registry.update_image_metadata(req.context, image_id, image_meta) self.notifier.info("image.update", image_meta_data) return image_meta_data except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: LOG.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) if location else None def _get_size(self, context, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(context, location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data: image_meta = self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._upload_and_activate(req, image_meta) elif self._copy_from(req): msg = _('Triggering asynchronous copy from external source') LOG.info(msg) self.pool.spawn_n(self._upload_and_activate, req, image_meta) else: location = image_meta.get('location') if location: self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._activate(req, image_id, location) return image_meta def _validate_image_for_activation(self, req, id, values): """Ensures that all required image metadata values are valid.""" image = self.get_image_meta_or_404(req, id) if not 'disk_format' in values: values['disk_format'] = image['disk_format'] if not 'container_format' in values: values['container_format'] = image['container_format'] if not 'name' in values: values['name'] = image['name'] values = validate_image_meta(req, values) return values @utils.mutating def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) location_uri = image_meta.get('location') if location_uri: self.update_store_acls(req, id, location_uri, public=is_public) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # Do not allow any updates on a deleted image. # Fix for LP Bug #1060930 if orig_status == 'deleted': msg = _("Forbidden to update deleted image.") raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) # Make image public in the backend store (if implemented) orig_or_updated_loc = location or orig_image_meta.get('location', None) if orig_or_updated_loc: self.update_store_acls(req, id, orig_or_updated_loc, public=is_public) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(req.context, image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if image['status'] == 'deleted': msg = _("Forbidden to delete a deleted image.") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if image['location'] and CONF.delayed_delete: status = 'pending_delete' else: status = 'deleted' try: # Delete the image from the registry first, since we rely on it # for authorization checks. # See https://bugs.launchpad.net/glance/+bug/1065187 registry.update_image_metadata(req.context, id, {'status': status}) registry.delete_image_metadata(req.context, id) # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 if image['location']: if CONF.delayed_delete: schedule_delayed_delete_from_backend(image['location'], id) else: safe_delete_from_backend(image['location'], req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', image) def get_store_or_400(self, request, scheme): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param scheme: The backend store scheme :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(request.context, scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) raise HTTPBadRequest(explanation=msg, request=request, content_type='text/plain') def verify_scheme_or_exit(self, scheme): """ Verifies availability of the storage backend for the given scheme or exits :param scheme: The backend store scheme """ try: get_store_from_scheme(context.RequestContext(), scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg % scheme) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(explanation=msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) #NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = _("Denying attempt to upload image larger than %d bytes.") LOG.warn(msg % max_image_size) raise HTTPBadRequest(explanation=msg % max_image_size, request=request) result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self): self.notifier = notifier.Notifier() def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location.encode('utf-8') def _inject_checksum_header(self, response, image_meta): if image_meta['checksum'] is not None: response.headers['ETag'] = image_meta['checksum'].encode('utf-8') def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k.encode('utf-8')] = v.encode('utf-8') def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] image_iter = result['image_iterator'] # image_meta['size'] should be an int, but could possibly be a str expected_size = int(image_meta['size']) response.app_iter = common.size_checked_iter( response, image_meta, expected_size, image_iter, self.notifier) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = str(image_meta['size']) response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3771_0
crossvul-python_data_bad_3634_5
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova.openstack.common import cfg from nova import flags quota_opts = [ cfg.IntOpt('quota_instances', default=10, help='number of instances allowed per project'), cfg.IntOpt('quota_cores', default=20, help='number of instance cores allowed per project'), cfg.IntOpt('quota_ram', default=50 * 1024, help='megabytes of instance ram allowed per project'), cfg.IntOpt('quota_volumes', default=10, help='number of volumes allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of volume gigabytes allowed per project'), cfg.IntOpt('quota_floating_ips', default=10, help='number of floating ips allowed per project'), cfg.IntOpt('quota_metadata_items', default=128, help='number of metadata items allowed per instance'), cfg.IntOpt('quota_max_injected_files', default=5, help='number of injected files allowed'), cfg.IntOpt('quota_max_injected_file_content_bytes', default=10 * 1024, help='number of bytes allowed per injected file'), cfg.IntOpt('quota_max_injected_file_path_bytes', default=255, help='number of bytes allowed per injected file path'), ] FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_max_injected_files, 'injected_file_content_bytes': FLAGS.quota_max_injected_file_content_bytes, } # -1 in the quota flags means unlimited for key in defaults.keys(): if defaults[key] == -1: defaults[key] = None return defaults def get_project_quotas(context, project_id): rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: rval[key] = quota[key] return rval def _get_request_allotment(requested, used, quota): if quota is None: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) if instance_type['vcpus']: allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus']) if instance_type['memory_mb']: allowed_instances = min(allowed_instances, allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_5
crossvul-python_data_good_3694_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or user.get('enabled', True) == False: try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3694_0
crossvul-python_data_good_1622_3
from binascii import hexlify from configparser import RawConfigParser import os from io import StringIO import stat import subprocess import sys import shutil import tempfile import time import unittest from hashlib import sha256 from attic import xattr from attic.archive import Archive, ChunkBuffer from attic.archiver import Archiver from attic.cache import Cache from attic.crypto import bytes_to_long, num_aes_blocks from attic.helpers import Manifest from attic.remote import RemoteRepository, PathNotAllowed from attic.repository import Repository from attic.testsuite import AtticTestCase from attic.testsuite.mock import patch try: import llfuse has_llfuse = True except ImportError: has_llfuse = False has_lchflags = hasattr(os, 'lchflags') src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__), '..') class changedir: def __init__(self, dir): self.dir = dir def __enter__(self): self.old = os.getcwd() os.chdir(self.dir) def __exit__(self, *args, **kw): os.chdir(self.old) class environment_variable: def __init__(self, **values): self.values = values self.old_values = {} def __enter__(self): for k, v in self.values.items(): self.old_values[k] = os.environ.get(k) os.environ[k] = v def __exit__(self, *args, **kw): for k, v in self.old_values.items(): if v is not None: os.environ[k] = v class ArchiverTestCaseBase(AtticTestCase): prefix = '' def setUp(self): os.environ['ATTIC_CHECK_I_KNOW_WHAT_I_AM_DOING'] = '1' self.archiver = Archiver() self.tmpdir = tempfile.mkdtemp() self.repository_path = os.path.join(self.tmpdir, 'repository') self.repository_location = self.prefix + self.repository_path self.input_path = os.path.join(self.tmpdir, 'input') self.output_path = os.path.join(self.tmpdir, 'output') self.keys_path = os.path.join(self.tmpdir, 'keys') self.cache_path = os.path.join(self.tmpdir, 'cache') self.exclude_file_path = os.path.join(self.tmpdir, 'excludes') os.environ['ATTIC_KEYS_DIR'] = self.keys_path os.environ['ATTIC_CACHE_DIR'] = self.cache_path os.mkdir(self.input_path) os.mkdir(self.output_path) os.mkdir(self.keys_path) os.mkdir(self.cache_path) with open(self.exclude_file_path, 'wb') as fd: fd.write(b'input/file2\n# A commment line, then a blank line\n\n') self._old_wd = os.getcwd() os.chdir(self.tmpdir) def tearDown(self): shutil.rmtree(self.tmpdir) os.chdir(self._old_wd) def attic(self, *args, **kw): exit_code = kw.get('exit_code', 0) fork = kw.get('fork', False) if fork: try: output = subprocess.check_output((sys.executable, '-m', 'attic.archiver') + args) ret = 0 except subprocess.CalledProcessError as e: output = e.output ret = e.returncode output = os.fsdecode(output) if ret != exit_code: print(output) self.assert_equal(exit_code, ret) return output args = list(args) stdout, stderr = sys.stdout, sys.stderr try: output = StringIO() sys.stdout = sys.stderr = output ret = self.archiver.run(args) sys.stdout, sys.stderr = stdout, stderr if ret != exit_code: print(output.getvalue()) self.assert_equal(exit_code, ret) return output.getvalue() finally: sys.stdout, sys.stderr = stdout, stderr def create_src_archive(self, name): self.attic('create', self.repository_location + '::' + name, src_dir) class ArchiverTestCase(ArchiverTestCaseBase): def create_regular_file(self, name, size=0, contents=None): filename = os.path.join(self.input_path, name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'wb') as fd: if contents is None: contents = b'X' * size fd.write(contents) def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regular_file('empty', size=0) # 2600-01-01 > 2**64 ns os.utime('input/empty', (19880895600, 19880895600)) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('flagfile', size=1024) # Directory self.create_regular_file('dir2/file2', size=1024 * 80) # File owner os.chown('input/file1', 100, 200) # File mode os.chmod('input/file1', 0o7755) os.chmod('input/dir2', 0o555) # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) # Hard link os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink os.symlink('somewhere', os.path.join(self.input_path, 'link1')) if xattr.is_enabled(): xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar') xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False) # FIFO node os.mkfifo(os.path.join(self.input_path, 'fifo1')) if has_lchflags: os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP) def test_basic_functionality(self): self.create_test_files() self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') self.attic('create', self.repository_location + '::test.2', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_equal(len(self.attic('list', self.repository_location).splitlines()), 2) self.assert_equal(len(self.attic('list', self.repository_location + '::test').splitlines()), 11) self.assert_dirs_equal('input', 'output/input') info_output = self.attic('info', self.repository_location + '::test') self.assert_in('Number of files: 4', info_output) shutil.rmtree(self.cache_path) with environment_variable(ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='1'): info_output2 = self.attic('info', self.repository_location + '::test') # info_output2 starts with some "initializing cache" text but should # end the same way as info_output assert info_output2.endswith(info_output) def _extract_repository_id(self, path): return Repository(self.repository_path).id def _set_repository_id(self, path, id): config = RawConfigParser() config.read(os.path.join(path, 'config')) config.set('repository', 'id', hexlify(id).decode('ascii')) with open(os.path.join(path, 'config'), 'w') as fd: config.write(fd) return Repository(self.repository_path).id def test_repository_swap_detection(self): self.create_test_files() os.environ['ATTIC_PASSPHRASE'] = 'passphrase' self.attic('init', '--encryption=passphrase', self.repository_location) repository_id = self._extract_repository_id(self.repository_path) self.attic('create', self.repository_location + '::test', 'input') shutil.rmtree(self.repository_path) self.attic('init', '--encryption=none', self.repository_location) self._set_repository_id(self.repository_path, repository_id) self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) self.assert_raises(Cache.EncryptionMethodMismatch, lambda :self.attic('create', self.repository_location + '::test.2', 'input')) def test_strip_components(self): self.attic('init', self.repository_location) self.create_regular_file('dir/file') self.attic('create', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test', '--strip-components', '3') self.assert_true(not os.path.exists('file')) with self.assert_creates_file('file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '2') with self.assert_creates_file('dir/file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '1') with self.assert_creates_file('input/dir/file'): self.attic('extract', self.repository_location + '::test', '--strip-components', '0') def test_extract_include_exclude(self): self.attic('init', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.attic('create', '--exclude=input/file4', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test', 'input/file1', ) self.assert_equal(sorted(os.listdir('output/input')), ['file1']) with changedir('output'): self.attic('extract', '--exclude=input/file2', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) with changedir('output'): self.attic('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) def test_exclude_caches(self): self.attic('init', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('cache1/CACHEDIR.TAG', contents = b'Signature: 8a477f597d28d172789f06886806bc55 extra stuff') self.create_regular_file('cache2/CACHEDIR.TAG', contents = b'invalid signature') self.attic('create', '--exclude-caches', self.repository_location + '::test', 'input') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1']) self.assert_equal(sorted(os.listdir('output/input/cache2')), ['CACHEDIR.TAG']) def test_path_normalization(self): self.attic('init', self.repository_location) self.create_regular_file('dir1/dir2/file', size=1024 * 80) with changedir('input/dir1/dir2'): self.attic('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..') output = self.attic('list', self.repository_location + '::test') self.assert_not_in('..', output) self.assert_in(' input/dir1/dir2/file', output) def test_repeated_files(self): self.create_regular_file('file1', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input', 'input') def test_overwrite(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') # Overwriting regular files and directories should be supported os.mkdir('output/input') os.mkdir('output/input/file1') os.mkdir('output/input/dir2') with changedir('output'): self.attic('extract', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') # But non-empty dirs should fail os.unlink('output/input/file1') os.mkdir('output/input/file1') os.mkdir('output/input/file1/dir') with changedir('output'): self.attic('extract', self.repository_location + '::test', exit_code=1) def test_delete(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') self.attic('create', self.repository_location + '::test.2', 'input') self.attic('extract', '--dry-run', self.repository_location + '::test') self.attic('extract', '--dry-run', self.repository_location + '::test.2') self.attic('delete', self.repository_location + '::test') self.attic('extract', '--dry-run', self.repository_location + '::test.2') self.attic('delete', self.repository_location + '::test.2') # Make sure all data except the manifest has been deleted repository = Repository(self.repository_path) self.assert_equal(len(repository), 1) def test_corrupted_repository(self): self.attic('init', self.repository_location) self.create_src_archive('test') self.attic('extract', '--dry-run', self.repository_location + '::test') self.attic('check', self.repository_location) name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[0] fd = open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+') fd.seek(100) fd.write('XXXX') fd.close() self.attic('check', self.repository_location, exit_code=1) def test_readonly_repository(self): self.attic('init', self.repository_location) self.create_src_archive('test') os.system('chmod -R ugo-w ' + self.repository_path) try: self.attic('extract', '--dry-run', self.repository_location + '::test') finally: # Restore permissions so shutil.rmtree is able to delete it os.system('chmod -R u+w ' + self.repository_path) def test_cmdline_compatibility(self): self.create_regular_file('file1', size=1024 * 80) self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test', 'input') output = self.attic('verify', '-v', self.repository_location + '::test') self.assert_in('"attic verify" has been deprecated', output) output = self.attic('prune', self.repository_location, '--hourly=1') self.assert_in('"--hourly" has been deprecated. Use "--keep-hourly" instead', output) def test_prune_repository(self): self.attic('init', self.repository_location) self.attic('create', self.repository_location + '::test1', src_dir) self.attic('create', self.repository_location + '::test2', src_dir) output = self.attic('prune', '-v', '--dry-run', self.repository_location, '--keep-daily=2') self.assert_in('Keeping archive: test2', output) self.assert_in('Would prune: test1', output) output = self.attic('list', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.attic('prune', self.repository_location, '--keep-daily=2') output = self.attic('list', self.repository_location) self.assert_not_in('test1', output) self.assert_in('test2', output) def test_usage(self): self.assert_raises(SystemExit, lambda: self.attic()) self.assert_raises(SystemExit, lambda: self.attic('-h')) @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_repository(self): mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) self.attic('init', self.repository_location) self.create_test_files() self.attic('create', self.repository_location + '::archive', 'input') self.attic('create', self.repository_location + '::archive2', 'input') try: self.attic('mount', self.repository_location, mountpoint, fork=True) self.wait_for_mount(mountpoint) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input')) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input')) finally: if sys.platform.startswith('linux'): os.system('fusermount -u ' + mountpoint) else: os.system('umount ' + mountpoint) os.rmdir(mountpoint) # Give the daemon some time to exit time.sleep(.2) @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_archive(self): mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) self.attic('init', self.repository_location) self.create_test_files() self.attic('create', self.repository_location + '::archive', 'input') try: self.attic('mount', self.repository_location + '::archive', mountpoint, fork=True) self.wait_for_mount(mountpoint) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input')) finally: if sys.platform.startswith('linux'): os.system('fusermount -u ' + mountpoint) else: os.system('umount ' + mountpoint) os.rmdir(mountpoint) # Give the daemon some time to exit time.sleep(.2) def verify_aes_counter_uniqueness(self, method): seen = set() # Chunks already seen used = set() # counter values already used def verify_uniqueness(): repository = Repository(self.repository_path) for key, _ in repository.open_index(repository.get_transaction_id()).iteritems(): data = repository.get(key) hash = sha256(data).digest() if not hash in seen: seen.add(hash) num_blocks = num_aes_blocks(len(data) - 41) nonce = bytes_to_long(data[33:41]) for counter in range(nonce, nonce + num_blocks): self.assert_not_in(counter, used) used.add(counter) self.create_test_files() os.environ['ATTIC_PASSPHRASE'] = 'passphrase' self.attic('init', '--encryption=' + method, self.repository_location) verify_uniqueness() self.attic('create', self.repository_location + '::test', 'input') verify_uniqueness() self.attic('create', self.repository_location + '::test.2', 'input') verify_uniqueness() self.attic('delete', self.repository_location + '::test.2') verify_uniqueness() self.assert_equal(used, set(range(len(used)))) def test_aes_counter_uniqueness_keyfile(self): self.verify_aes_counter_uniqueness('keyfile') def test_aes_counter_uniqueness_passphrase(self): self.verify_aes_counter_uniqueness('passphrase') class ArchiverCheckTestCase(ArchiverTestCaseBase): def setUp(self): super(ArchiverCheckTestCase, self).setUp() with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.attic('init', self.repository_location) self.create_src_archive('archive1') self.create_src_archive('archive2') def open_archive(self, name): repository = Repository(self.repository_path) manifest, key = Manifest.load(repository) archive = Archive(repository, key, manifest, name) return archive, repository def test_check_usage(self): output = self.attic('check', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) output = self.attic('check', '--repository-only', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_not_in('Starting archive consistency check', output) output = self.attic('check', '--archives-only', self.repository_location, exit_code=0) self.assert_not_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) def test_missing_file_chunk(self): archive, repository = self.open_archive('archive1') for item in archive.iter_items(): if item[b'path'].endswith('testsuite/archiver.py'): repository.delete(item[b'chunks'][-1][0]) break repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_archive_item_chunk(self): archive, repository = self.open_archive('archive1') repository.delete(archive.metadata[b'items'][-5]) repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_archive_metadata(self): archive, repository = self.open_archive('archive1') repository.delete(archive.id) repository.commit() self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) def test_missing_manifest(self): archive, repository = self.open_archive('archive1') repository.delete(Manifest.MANIFEST_ID) repository.commit() self.attic('check', self.repository_location, exit_code=1) output = self.attic('check', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.attic('check', self.repository_location, exit_code=0) def test_extra_chunks(self): self.attic('check', self.repository_location, exit_code=0) repository = Repository(self.repository_location) repository.put(b'01234567890123456789012345678901', b'xxxx') repository.commit() repository.close() self.attic('check', self.repository_location, exit_code=1) self.attic('check', self.repository_location, exit_code=1) self.attic('check', '--repair', self.repository_location, exit_code=0) self.attic('check', self.repository_location, exit_code=0) self.attic('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0) class RemoteArchiverTestCase(ArchiverTestCase): prefix = '__testsuite__:' def test_remote_repo_restrict_to_path(self): self.attic('init', self.repository_location) path_prefix = os.path.dirname(self.repository_path) with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']): self.assert_raises(PathNotAllowed, lambda: self.attic('init', self.repository_location + '_1')) with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]): self.attic('init', self.repository_location + '_2') with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]): self.attic('init', self.repository_location + '_3')
./CrossVul/dataset_final_sorted/CWE-264/py/good_1622_3
crossvul-python_data_bad_5236_0
# # Limited command Shell (lshell) # # Copyright (C) 2008-2013 Ignace Mouzannar (ghantoos) <ghantoos@ghantoos.org> # # This file is part of lshell # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import re import os # import lshell specifics from lshell import utils def warn_count(messagetype, command, conf, strict=None, ssh=None): """ Update the warning_counter, log and display a warning to the user """ log = conf['logpath'] if not ssh: if strict: conf['warning_counter'] -= 1 if conf['warning_counter'] < 0: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) log.critical('*** Kicked out') sys.exit(1) else: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) sys.stderr.write('*** You have %s warning(s) left,' ' before getting kicked out.\n' % conf['warning_counter']) log.error('*** User warned, counter: %s' % conf['warning_counter']) sys.stderr.write('This incident has been reported.\n') else: if not conf['quiet']: log.critical('*** forbidden %s: %s' % (messagetype, command)) # if you are here, means that you did something wrong. Return 1. return 1, conf def check_path(line, conf, completion=None, ssh=None, strict=None): """ Check if a path is entered in the line. If so, it checks if user are allowed to see this path. If user is not allowed, it calls warn_count. In case of completion, it only returns 0 or 1. """ allowed_path_re = str(conf['path'][0]) denied_path_re = str(conf['path'][1][:-1]) # split line depending on the operators sep = re.compile(r'\ |;|\||&') line = line.strip() line = sep.split(line) for item in line: # remove potential quotes or back-ticks item = re.sub(r'^["\'`]|["\'`]$', '', item) # remove potential $(), ${}, `` item = re.sub(r'^\$[\(\{]|[\)\}]$', '', item) # if item has been converted to something other than a string # or an int, reconvert it to a string if type(item) not in ['str', 'int']: item = str(item) # replace "~" with home path item = os.path.expanduser(item) # expand shell wildcards using "echo" # i know, this a bit nasty... if re.findall('\$|\*|\?', item): # remove quotes if available item = re.sub("\"|\'", "", item) import subprocess p = subprocess.Popen("`which echo` %s" % item, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cout = p.stdout try: item = cout.readlines()[0].decode('utf8').split(' ')[0] item = item.strip() item = os.path.expandvars(item) except IndexError: conf['logpath'].critical('*** Internal error: command not ' 'executed') return 1, conf tomatch = os.path.realpath(item) if os.path.isdir(tomatch) and tomatch[-1] != '/': tomatch += '/' match_allowed = re.findall(allowed_path_re, tomatch) if denied_path_re: match_denied = re.findall(denied_path_re, tomatch) else: match_denied = None # if path not allowed # case path executed: warn, and return 1 # case completion: return 1 if not match_allowed or match_denied: if not completion: ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) return 1, conf if not completion: if not re.findall(allowed_path_re, os.getcwd() + '/'): ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) os.chdir(conf['home_path']) conf['promptprint'] = utils.updateprompt(os.getcwd(), conf) return 1, conf return 0, conf def check_secure(line, conf, strict=None, ssh=None): """This method is used to check the content on the typed command. Its purpose is to forbid the user to user to override the lshell command restrictions. The forbidden characters are placed in the 'forbidden' variable. Feel free to update the list. Emptying it would be quite useless..: ) A warning counter has been added, to kick out of lshell a user if he is warned more than X time (X being the 'warning_counter' variable). """ # store original string oline = line # strip all spaces/tabs line = " ".join(line.split()) # init return code returncode = 0 # This logic is kept crudely simple on purpose. # At most we might match the same stanza twice # (for e.g. "'a'", 'a') but the converse would # require detecting single quotation stanzas # nested within double quotes and vice versa relist = re.findall(r'[^=]\"(.+)\"', line) relist2 = re.findall(r'[^=]\'(.+)\'', line) relist = relist + relist2 for item in relist: if os.path.exists(item): ret_check_path, conf = check_path(item, conf, strict=strict) returncode += ret_check_path # ignore quoted text line = re.sub(r'\"(.+?)\"', '', line) line = re.sub(r'\'(.+?)\'', '', line) if re.findall('[:cntrl:].*\n', line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf for item in conf['forbidden']: # allow '&&' and '||' even if singles are forbidden if item in ['&', '|']: if re.findall("[^\%s]\%s[^\%s]" % (item, item, item), line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf else: if item in line: ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf # check if the line contains $(foo) executions, and check them executions = re.findall('\$\([^)]+[)]', line) for item in executions: # recurse on check_path ret_check_path, conf = check_path(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_path # recurse on check_secure ret_check_secure, conf = check_secure(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check for executions using back quotes '`' executions = re.findall('\`[^`]+[`]', line) for item in executions: ret_check_secure, conf = check_secure(item[1:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check if the line contains ${foo=bar}, and check them curly = re.findall('\$\{[^}]+[}]', line) for item in curly: # split to get variable only, and remove last character "}" if re.findall(r'=|\+|\?|\-', item): variable = re.split('=|\+|\?|\-', item, 1) else: variable = item ret_check_path, conf = check_path(variable[1][:-1], conf, strict=strict) returncode += ret_check_path # if unknown commands where found, return 1 and don't execute the line if returncode > 0: return 1, conf # in case the $(foo) or `foo` command passed the above tests elif line.startswith('$(') or line.startswith('`'): return 0, conf # in case ';', '|' or '&' are not forbidden, check if in line lines = [] # corrected by Alojzij Blatnik #48 # test first character if line[0] in ["&", "|", ";"]: start = 1 else: start = 0 # split remaining command line for i in range(1, len(line)): # in case \& or \| or \; don't split it if line[i] in ["&", "|", ";"] and line[i - 1] != "\\": # if there is more && or || skip it if start != i: lines.append(line[start:i]) start = i + 1 # append remaining command line if start != len(line): lines.append(line[start:len(line)]) # remove trailing parenthesis line = re.sub('\)$', '', line) for separate_line in lines: separate_line = " ".join(separate_line.split()) splitcmd = separate_line.strip().split(' ') command = splitcmd[0] if len(splitcmd) > 1: cmdargs = splitcmd else: cmdargs = None # in case of a sudo command, check in sudo_commands list if allowed if command == 'sudo': if type(cmdargs) == list: # allow the -u (user) flag if cmdargs[1] == '-u' and cmdargs: sudocmd = cmdargs[3] else: sudocmd = cmdargs[1] if sudocmd not in conf['sudo_commands'] and cmdargs: ret, conf = warn_count('sudo command', oline, conf, strict=strict, ssh=ssh) return ret, conf # if over SSH, replaced allowed list with the one of overssh if ssh: conf['allowed'] = conf['overssh'] # for all other commands check in allowed list if command not in conf['allowed'] and command: ret, conf = warn_count('command', command, conf, strict=strict, ssh=ssh) return ret, conf return 0, conf
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5236_0
crossvul-python_data_bad_3634_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import datetime import functools import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import utils from nova import log as logging from nova.compute import aggregate_states from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import and_ from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS flags.DECLARE('reserved_host_disk_mb', 'nova.scheduler.host_manager') flags.DECLARE('reserved_host_memory_mb', 'nova.scheduler.host_manager') LOG = logging.getLogger(__name__) def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): db.aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and service_ref.compute_node: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): result = model_query(context, models.Service, session=session).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic="compute").\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = model_query(context, models.Instance.host, func.sum(models.Instance.vcpus).label(label), session=session, read_deleted="no").\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_get_all(context, session=None): return model_query(context, models.ComputeNode, session=session).\ options(joinedload('service')).\ all() def _get_host_utilization(context, host, ram_mb, disk_gb): """Compute the current utilization of a given host.""" instances = instance_get_all_by_host(context, host) vms = len(instances) free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024) work = 0 for instance in instances: free_ram_mb -= instance.memory_mb free_disk_gb -= instance.root_gb free_disk_gb -= instance.ephemeral_gb if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING, vm_states.MIGRATING, vm_states.RESIZING]: work += 1 return dict(free_ram_mb=free_ram_mb, free_disk_gb=free_disk_gb, current_workload=work, running_vms=vms) def _adjust_compute_node_values_for_utilization(context, values, session): service_ref = service_get(context, values['service_id'], session=session) host = service_ref['host'] ram_mb = values['memory_mb'] disk_gb = values['local_gb'] values.update(_get_host_utilization(context, host, ram_mb, disk_gb)) @require_admin_context def compute_node_create(context, values, session=None): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" if not session: session = get_session() _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_node_ref = models.ComputeNode() session.add(compute_node_ref) compute_node_ref.update(values) return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values, auto_adjust): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" session = get_session() if auto_adjust: _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) def compute_node_get_by_host(context, host): """Get all capacity entries for the given host.""" session = get_session() with session.begin(): node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False) return node.first() def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): """Update a specific ComputeNode entry by a series of deltas. Do this as a single atomic action and lock the row for the duration of the operation. Requires that ComputeNode record exist.""" session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) # This table thingy is how we get atomic UPDATE x = x + 1 # semantics. table = models.ComputeNode.__table__ if free_ram_mb_delta != 0: compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta if free_disk_gb_delta != 0: compute_node.free_disk_gb = (table.c.free_disk_gb + free_disk_gb_delta) if work_delta != 0: compute_node.current_workload = (table.c.current_workload + work_delta) if vm_delta != 0: compute_node.running_vms = table.c.running_vms + vm_delta return compute_node def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): """Like compute_node_utilization_update() modify a specific host entry. But this function will set the metrics absolutely (vs. a delta update). """ session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) if free_ram_mb != None: compute_node.free_ram_mb = free_ram_mb if free_disk_gb != None: compute_node.free_disk_gb = free_disk_gb if work != None: compute_node.current_workload = work if vms != None: compute_node.running_vms = vms return compute_node ################### @require_admin_context def certificate_get(context, certificate_id, session=None): result = model_query(context, models.Certificate, session=session).\ filter_by(id=certificate_id).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_admin_context def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_get_pools(context): session = get_session() pools = [] for result in session.query(models.FloatingIp.pool).distinct(): pools.append({'name': result[0]}) return pools @require_context def floating_ip_allocate_address(context, project_id, pool): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = fixed_ip_get(context, floating_ip_ref['fixed_ip_id']) if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) def _floating_ip_get_all(context): return model_query(context, models.FloatingIp, read_deleted="no") @require_admin_context def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address, session=None): if not session: session = get_session() fixed_ip = fixed_ip_get_by_address(context, fixed_address, session) fixed_ip_id = fixed_ip['id'] return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None): if not session: session = get_session() return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) @require_context def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) @require_admin_context def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref @require_admin_context def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone domain_ref.save(session=session) @require_admin_context def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project domain_ref.save(session=session) @require_admin_context def dnsdomain_unregister(context, fqdomain): session = get_session() with session.begin(): session.query(models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() @require_context def dnsdomain_list(context): session = get_session() records = model_query(context, models.DNSDomain, session=session, read_deleted="no").\ with_lockmode('update').all() domains = [] for record in records: domains.append(record.domain) return domains ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance_id: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network_id: fixed_ip_ref.network_id = network_id fixed_ip_ref.instance_id = instance_id session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_id=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if fixed_ip_ref['network_id'] is None: fixed_ip_ref['network'] = network_id if instance_id: fixed_ip_ref['instance_id'] = instance_id if host: fixed_ip_ref['host'] = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref['instance_id'] = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == True), models.Network.host == host) result = session.query(models.FixedIp.id).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.allocated == False).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.id == models.FixedIp.instance_id)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, session=None): result = model_query(context, models.FixedIp, session=session).\ filter_by(id=id).\ first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_all(context, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ filter_by(address=address).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return result @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ all() return result @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def _virtual_interface_query(context, session=None): return model_query(context, models.VirtualInterface, session=session, read_deleted="yes") @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context, session=session).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) @require_context def virtual_interface_get_all(context): """Get all vifs""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values = values.copy() values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) # and creat the info_cache table entry for instance instance_info_cache_create(context, {'instance_id': instance_ref['uuid']}) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) instance_id = instance_ref['id'] else: instance_ref = instance_get(context, instance_id, session=session) session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) instance_info_cache_delete(context, instance_ref['uuid'], session=session) return instance_ref @require_context def instance_get_by_uuid(context, uuid, session=None): result = _build_instance_get(context, session=session).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): result = _build_instance_get(context, session=session).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): return model_query(context, models.Instance, session=session, project_only=True).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all(context): return model_query(context, models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ all() @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False sort_fn = {'desc': desc, 'asc': asc} session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = utils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): deleted = or_(models.Instance.deleted == True, models.Instance.vm_state == vm_states.SOFT_DELETE) query_prefix = query_prefix.filter(deleted) else: query_prefix = query_prefix.\ filter_by(deleted=False).\ filter(models.Instance.vm_state != vm_states.SOFT_DELETE) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid'] # Filter the query query_prefix = exact_filter(query_prefix, models.Instance, filters, exact_match_filter_names) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) if not instances: break return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def _instance_get_all_query(context, project_only=False): return model_query(context, models.Instance, project_only=project_only).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all_by_host(context, host): return _instance_get_all_query(context).filter_by(host=host).all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _instance_get_all_query(context).\ filter_by(project_id=project_id).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): return _instance_get_all_query(context, project_only=True).\ filter_by(reservation_id=reservation_id).\ all() # NOTE(jkoelker) This is only being left here for compat with floating # ips. Currently the network_api doesn't return floaters # in network_info. Once it starts return the model. This # function and it's call in compute/manager.py on 1829 can # go away @require_context def instance_get_floating_address(context, instance_id): fixed_ips = fixed_ip_get_by_instance(context, instance_id) if not fixed_ips: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips floating_ips = floating_ip_get_by_fixed_address(context, fixed_ips[0]['address']) if not floating_ips: return None # NOTE(vish): this just returns the first floating ip return floating_ips[0]['address'] @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window, session=None): reboot_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=reboot_window) if not session: session = get_session() results = session.query(models.Instance).\ filter(models.Instance.updated_at <= reboot_window).\ filter_by(task_state="rebooting").all() return results @require_context def instance_test_and_set(context, instance_id, attr, ok_states, new_state, session=None): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ if not session: session = get_session() with session.begin(): query = model_query(context, models.Instance, session=session, project_only=True) if utils.is_uuid_like(instance_id): query = query.filter_by(uuid=instance_id) else: query = query.filter_by(id=instance_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues instance = query.with_lockmode('update').first() state = instance[attr] if state not in ok_states: raise exception.InstanceInvalidState( attr=attr, instance_uuid=instance['uuid'], state=state, method='instance_test_and_set') instance[attr] = new_state instance.save(session=session) @require_context def instance_update(context, instance_id, values): session = get_session() if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_ref['id'], values.pop('metadata'), delete=True) with session.begin(): instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_ref['id']).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_uuid): """Return the actions associated to the given instance id""" session = get_session() return session.query(models.InstanceActions).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def instance_get_id_to_uuid_mapping(context, ids): session = get_session() instances = session.query(models.Instance).\ filter(models.Instance.id.in_(ids)).\ all() mapping = {} for instance in instances: mapping[instance['id']] = instance['uuid'] return mapping ################### @require_context def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ info_cache = models.InstanceInfoCache() info_cache.update(values) session = get_session() with session.begin(): info_cache.save(session=session) return info_cache @require_context def instance_info_cache_get(context, instance_uuid, session=None): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance :param session: = optional session object """ session = session or get_session() info_cache = session.query(models.InstanceInfoCache).\ filter_by(instance_id=instance_uuid).\ first() return info_cache @require_context def instance_info_cache_update(context, instance_uuid, values, session=None): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update :param session: = optional session object """ session = session or get_session() info_cache = instance_info_cache_get(context, instance_uuid, session=session) if info_cache: info_cache.update(values) info_cache.save(session=session) else: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry values['instance_id'] = instance_uuid info_cache = instance_info_cache_create(context, values) return info_cache @require_context def instance_info_cache_delete(context, instance_uuid, session=None): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ values = {'deleted': True, 'deleted_at': utils.utcnow()} instance_info_cache_update(context, instance_uuid, values, session) ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) result = model_query(context, models.KeyPair, session=session).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): return model_query(context, models.Network).count() @require_admin_context def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) @require_admin_context def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() @require_admin_context def network_create_safe(context, values): if values.get('vlan'): if model_query(context, models.Network, read_deleted="no")\ .filter_by(vlan=values['vlan'])\ .first(): raise exception.DuplicateVlan(vlan=values['vlan']) network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): network_ref = network_get(context, network_id=network_id, session=session) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_context def network_get(context, network_id, session=None): result = model_query(context, models.Network, session=session, project_only=True).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): result = model_query(context, models.Network, read_deleted="no").all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = model_query(context, models.Network, read_deleted="no").\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == False) inst_and = and_(models.Instance.id == models.FixedIp.instance_id, models.Instance.deleted == False) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_id, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None) if host: query = query.filter(models.Instance.host == host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_id'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] data.append(cleaned) return data @require_admin_context def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") @require_admin_context def network_get_by_bridge(context, bridge): result = _network_get_query(context).filter_by(bridge=bridge).first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ first() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_instance(context, instance_id): result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_host(context, host): session = get_session() fixed_ip_query = model_query(context, models.FixedIp.network_id, session=session).\ filter(models.FixedIp.host == host) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = _network_get_query(context, session=session).\ filter_by(id=network_id).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): result = model_query(context, models.AuthToken, session=session).\ filter_by(token_hash=token_hash).\ first() if not result: raise exception.AuthTokenNotFound(token=token_hash) return result @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): # NOTE: Treat -1 as unlimited for consistency w/ flags if limit == -1: limit = None quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): # NOTE: Treat -1 as unlimited for consistency w/ flags if limit == -1: limit = None session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ filter_by(volume=None).\ filter_by(host=host).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')) @require_context def volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): return _volume_get_query(context).all() @require_admin_context def volume_get_all_by_host(context, host): return _volume_get_query(context).filter_by(host=host).all() @require_admin_context def volume_get_all_by_instance(context, instance_id): result = model_query(context, models.Volume, read_deleted="no").\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _volume_get_query(context).filter_by(project_id=project_id).all() @require_admin_context def volume_get_instance(context, volume_id): result = _volume_get_query(context).filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### def _volume_metadata_get_query(context, volume_id, session=None): return model_query(context, models.VolumeMetadata, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) @require_context @require_volume_exists def volume_metadata_get(context, volume_id): rows = _volume_metadata_get_query(context, volume_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): _volume_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): result = _volume_metadata_get_query(context, volume_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): return model_query(context, models.Snapshot).all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ filter_by(project_id=project_id).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### def _block_device_mapping_get_query(context, session=None): return model_query(context, models.BlockDeviceMapping, session=session, read_deleted="no") @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(id=bdm_id).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): return _block_device_mapping_get_query(context).\ filter_by(instance_id=instance_id).\ all() @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_get_query(context, session=None, read_deleted=None, project_only=False): return model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only).\ options(joinedload_all('rules')) @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, session=None): result = _security_group_get_query(context, session=session, project_only=True).\ filter_by(id=security_group_id).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): result = _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_id): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=group_id).\ filter_by(deleted=False).\ all() for ia in inst_assoc: num_instances = session.query(models.Instance).\ filter_by(deleted=False).\ filter_by(id=ia.instance_id).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id, session=None): result = _security_group_rule_get_query(context, session=session).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(group_id=security_group_id).\ all() @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): result = model_query(context, models.User, session=session).\ filter_by(id=id).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): result = model_query(context, models.User, session=session).\ filter_by(access_key=access_key).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): return model_query(context, models.User).all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) ################### def project_create(context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): result = model_query(context, models.Project, session=session, read_deleted="no").\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): return model_query(context, models.Project).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): user = model_query(context, models.User).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @require_admin_context def migration_get_all_unconfirmed(context, confirm_window, session=None): confirm_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=confirm_window) return model_query(context, models.Migration, session=session, read_deleted="yes").\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="FINISHED").\ all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): return model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_id=instance_id).\ all() def console_get(context, console_id, instance_id=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_id is not None: query = query.filter_by(instance_id=instance_id) result = query.first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: instance_type_get_by_name(context, values['name'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.InstanceTypeNotFoundByName: pass try: instance_type_get_by_flavor_id(context, values['flavorid'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.FlavorNotFound: pass try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save(session=session) except Exception, e: raise exception.DBError(e) return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance, volume, or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _instance_type_get_query(context, session=None, read_deleted=None): return model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) @require_context def instance_type_get_all(context, inactive=False, filters=None): """ Returns all instance types. """ filters = filters or {} read_deleted = "yes" if inactive else "no" query = _instance_type_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) inst_types = query.order_by("name").all() return [_dict_with_extra_specs(i) for i in inst_types] @require_context def instance_type_get(context, id, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(id=id).\ first() if not result: raise exception.InstanceTypeNotFound(instance_type_id=id) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_name(context, name, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(name=name).\ first() if not result: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_flavor_id(context, flavor_id, session=None): """Returns a dict describing specific flavor_id""" result = _instance_type_get_query(context, session=session).\ filter_by(flavorid=flavor_id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @require_admin_context def instance_type_destroy(context, name): """Marks specific instance_type as deleted""" session = get_session() with session.begin(): instance_type_ref = instance_type_get_by_name(context, name, session=session) instance_type_id = instance_type_ref['id'] session.query(models.InstanceTypes).\ filter_by(id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### @require_admin_context def cell_create(context, values): cell = models.Cell() cell.update(values) cell.save() return cell def _cell_get_by_id_query(context, cell_id, session=None): return model_query(context, models.Cell, session=session).\ filter_by(id=cell_id) @require_admin_context def cell_update(context, cell_id, values): cell = cell_get(context, cell_id) cell.update(values) cell.save() return cell @require_admin_context def cell_delete(context, cell_id): session = get_session() with session.begin(): _cell_get_by_id_query(context, cell_id, session=session).\ delete() @require_admin_context def cell_get(context, cell_id): result = _cell_get_by_id_query(context, cell_id).first() if not result: raise exception.CellNotFound(cell_id=cell_id) return result @require_admin_context def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() #################### def _instance_metadata_get_query(context, instance_id, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_id=instance_id) @require_context @require_instance_exists def instance_metadata_get(context, instance_id): rows = _instance_metadata_get_query(context, instance_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): _instance_metadata_get_query(context, instance_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): result = _instance_metadata_get_query( context, instance_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): return model_query(context, models.AgentBuild, session=session, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() @require_admin_context def agent_build_get_all(context): return model_query(context, models.AgentBuild, read_deleted="no").\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def bw_usage_get_by_macs(context, macs, start_period): return model_query(context, models.BandwidthUsage, read_deleted="yes").\ filter(models.BandwidthUsage.mac.in_(macs)).\ filter_by(start_period=start_period).\ all() @require_context def bw_usage_update(context, mac, start_period, bw_in, bw_out, session=None): if not session: session = get_session() with session.begin(): bwusage = model_query(context, models.BandwidthUsage, session=session, read_deleted="yes").\ filter_by(start_period=start_period).\ filter_by(mac=mac).\ first() if not bwusage: bwusage = models.BandwidthUsage() bwusage.start_period = start_period bwusage.mac = mac bwusage.last_refreshed = utils.utcnow() bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.save(session=session) #################### def _instance_type_extra_specs_get_query(context, instance_type_id, session=None): return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id) @require_context def instance_type_extra_specs_get(context, instance_type_id): rows = _instance_type_extra_specs_get_query( context, instance_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): _instance_type_extra_specs_get_query( context, instance_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): result = _instance_type_extra_specs_get_query( context, instance_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceTypeExtraSpecsNotFound( extra_specs_key=key, instance_type_id=instance_type_id) return result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(name=values['name']) except exception.VolumeTypeNotFoundByName: pass try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): """ Returns a dict describing all volume_types with name as key. """ filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ order_by("name").\ all() # TODO(sirp): this patern of converting rows to a result with extra_specs # is repeated quite a bit, might be worth creating a method for it result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result @require_context def volume_type_get(context, id, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type=id) return _dict_with_extra_specs(result) @require_context def volume_type_get_by_name(context, name, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(result) @require_admin_context def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_query(context, volume_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception, e: raise exception.DBError(e) return s3_image_ref #################### @require_admin_context def sm_backend_conf_create(context, values): backend_conf = models.SMBackendConf() backend_conf.update(values) backend_conf.save() return backend_conf @require_admin_context def sm_backend_conf_update(context, sm_backend_id, values): session = get_session() with session.begin(): backend_conf = model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not backend_conf: raise exception.NotFound( _("No backend config with id %(sm_backend_id)s") % locals()) backend_conf.update(values) backend_conf.save(session=session) return backend_conf @require_admin_context def sm_backend_conf_delete(context, sm_backend_id): # FIXME(sirp): for consistency, shouldn't this just mark as deleted with # `purge` actually deleting the record? session = get_session() with session.begin(): model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ delete() @require_admin_context def sm_backend_conf_get(context, sm_backend_id): result = model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not result: raise exception.NotFound(_("No backend config with id " "%(sm_backend_id)s") % locals()) return result @require_admin_context def sm_backend_conf_get_by_sr(context, sr_uuid): session = get_session() return model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(sr_uuid=sr_uuid).\ first() @require_admin_context def sm_backend_conf_get_all(context): return model_query(context, models.SMBackendConf, read_deleted="yes").\ all() #################### def _sm_flavor_get_query(context, sm_flavor_label, session=None): return model_query(context, models.SMFlavors, session=session, read_deleted="yes").\ filter_by(label=sm_flavor_label) @require_admin_context def sm_flavor_create(context, values): sm_flavor = models.SMFlavors() sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_update(context, sm_flavor_label, values): sm_flavor = sm_flavor_get(context, sm_flavor_label) sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_delete(context, sm_flavor_label): session = get_session() with session.begin(): _sm_flavor_get_query(context, sm_flavor_label).delete() @require_admin_context def sm_flavor_get(context, sm_flavor_label): result = _sm_flavor_get_query(context, sm_flavor_label).first() if not result: raise exception.NotFound( _("No sm_flavor called %(sm_flavor)s") % locals()) return result @require_admin_context def sm_flavor_get_all(context): return model_query(context, models.SMFlavors, read_deleted="yes").all() ############################### def _sm_volume_get_query(context, volume_id, session=None): return model_query(context, models.SMVolume, session=session, read_deleted="yes").\ filter_by(id=volume_id) def sm_volume_create(context, values): sm_volume = models.SMVolume() sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_update(context, volume_id, values): sm_volume = sm_volume_get(context, volume_id) sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_delete(context, volume_id): session = get_session() with session.begin(): _sm_volume_get_query(context, volume_id, session=session).delete() def sm_volume_get(context, volume_id): result = _sm_volume_get_query(context, volume_id).first() if not result: raise exception.NotFound( _("No sm_volume with id %(volume_id)s") % locals()) return result def sm_volume_get_all(context): return model_query(context, models.SMVolume, read_deleted="yes").all() ################ def _aggregate_get_query(context, model_class, id_field, id, session=None, read_deleted='yes'): return model_query(context, model_class, session=session, read_deleted=read_deleted).filter(id_field == id) @require_admin_context def aggregate_create(context, values, metadata=None): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='yes').first() values.setdefault('operational_state', aggregate_states.CREATED) if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) elif aggregate.deleted: values['deleted'] = False values['deleted_at'] = None aggregate.update(values) aggregate.save(session=session) else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate @require_admin_context def aggregate_get(context, aggregate_id, read_deleted='no'): aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, read_deleted=read_deleted).first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @require_admin_context def aggregate_get_by_host(context, host, read_deleted='no'): aggregate_host = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.host, host, read_deleted='no').first() if not aggregate_host: raise exception.AggregateHostNotFound(host=host) return aggregate_get(context, aggregate_host.aggregate_id, read_deleted) @require_admin_context def aggregate_update(context, aggregate_id, values): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session, read_deleted='no').first() if aggregate: metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=True) with session.begin(): aggregate.update(values) aggregate.save(session=session) values['metadata'] = metadata return aggregate else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_delete(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, read_deleted='no') if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'operational_state': aggregate_states.DISMISSED, 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_get_all(context, read_deleted='yes'): return model_query(context, models.Aggregate, read_deleted=read_deleted).all() @require_admin_context @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id, read_deleted='no'): rows = model_query(context, models.AggregateMetadata, read_deleted=read_deleted).\ filter_by(aggregate_id=aggregate_id).all() return dict([(r['key'], r['value']) for r in rows]) @require_admin_context @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): query = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, read_deleted='no').\ filter_by(key=key) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_admin_context @require_aggregate_exists def aggregate_metadata_get_item(context, aggregate_id, key, session=None, read_deleted='yes'): result = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, session=session, read_deleted=read_deleted).\ filter_by(key=key).first() if not result: raise exception.AggregateMetadataNotFound(metadata_key=key, aggregate_id=aggregate_id) return result @require_admin_context @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): session = get_session() if set_delete: original_metadata = aggregate_metadata_get(context, aggregate_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None for meta_key, meta_value in metadata.iteritems(): item = {"value": meta_value} try: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) if meta_ref.deleted: item.update({'deleted': False, 'deleted_at': None}) except exception.AggregateMetadataNotFound: meta_ref = models.AggregateMetadata() item.update({"key": meta_key, "aggregate_id": aggregate_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata @require_admin_context @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'): rows = model_query(context, models.AggregateHost, read_deleted=read_deleted).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_admin_context @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): query = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, read_deleted='no').filter_by(host=host) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_admin_context @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): session = get_session() host_ref = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(host=host).first() if not host_ref: try: host_ref = models.AggregateHost() values = {"host": host, "aggregate_id": aggregate_id, } host_ref.update(values) host_ref.save(session=session) except exception.DBError: raise exception.AggregateHostConflict(host=host) elif host_ref.deleted: host_ref.update({'deleted': False, 'deleted_at': None}) host_ref.save(session=session) else: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref.iteritems()) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row.iteritems()) output[row['instance_uuid']].append(data) return output
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_4
crossvul-python_data_bad_3698_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # # Copyright 2011, Piston Cloud Computing, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to resize, repartition, and modify disk images. Includes injection of SSH PGP keys into authorized_keys file. """ import crypt import json import os import random import re import tempfile from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import cfg from nova import utils from nova.virt.disk import guestfs from nova.virt.disk import loop from nova.virt.disk import nbd LOG = logging.getLogger(__name__) disk_opts = [ cfg.StrOpt('injected_network_template', default='$pybasedir/nova/virt/interfaces.template', help='Template file for injected network'), cfg.ListOpt('img_handlers', default=['loop', 'nbd', 'guestfs'], help='Order of methods used to mount disk images'), # NOTE(yamahata): ListOpt won't work because the command may include a # comma. For example: # # mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to # escape such commas. # cfg.MultiStrOpt('virt_mkfs', default=[ 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs' ' --force --fast --label %(fs_label)s %(target)s', # NOTE(yamahata): vfat case #'windows=mkfs.vfat -n %(fs_label)s %(target)s', ], help='mkfs commands for ephemeral device. ' 'The format is <os_type>=<mkfs command>'), ] FLAGS = flags.FLAGS FLAGS.register_opts(disk_opts) _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None for s in FLAGS.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. # So item.partition('=') doesn't work here os_type, mkfs_command = s.split('=', 1) if os_type: _MKFS_COMMAND[os_type] = mkfs_command if os_type == 'default': _DEFAULT_MKFS_COMMAND = mkfs_command _QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)', re.MULTILINE) def mkfs(os_type, fs_label, target): mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % locals() if mkfs_command: utils.execute(*mkfs_command.split()) def get_image_virtual_size(image): out, _err = utils.execute('qemu-img', 'info', image) m = _QEMU_VIRT_SIZE_REGEX.search(out) return int(m.group(2)) def extend(image, size): """Increase image to size""" # NOTE(MotoKen): check image virtual size before resize virt_size = get_image_virtual_size(image) if virt_size >= size: return utils.execute('qemu-img', 'resize', image, size) # NOTE(vish): attempts to resize filesystem utils.execute('e2fsck', '-fp', image, check_exit_code=False) utils.execute('resize2fs', image, check_exit_code=False) def bind(src, target, instance_name): """Bind device to a filesytem""" if src: utils.execute('touch', target, run_as_root=True) utils.execute('mount', '-o', 'bind', src, target, run_as_root=True) s = os.stat(src) cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev), os.minor(s.st_rdev)) cgroups_path = \ "/sys/fs/cgroup/devices/libvirt/lxc/%s/devices.allow" \ % instance_name utils.execute('tee', cgroups_path, process_input=cgroup_info, run_as_root=True) def unbind(target): if target: utils.execute('umount', target, run_as_root=True) class _DiskImage(object): """Provide operations on a disk image file.""" def __init__(self, image, partition=None, use_cow=False, mount_dir=None): # These passed to each mounter self.image = image self.partition = partition self.mount_dir = mount_dir # Internal self._mkdir = False self._mounter = None self._errors = [] # As a performance tweak, don't bother trying to # directly loopback mount a cow image. self.handlers = FLAGS.img_handlers[:] if use_cow and 'loop' in self.handlers: self.handlers.remove('loop') if not self.handlers: raise exception.Error(_('no capable image handler configured')) @property def errors(self): """Return the collated errors from all operations.""" return '\n--\n'.join([''] + self._errors) @staticmethod def _handler_class(mode): """Look up the appropriate class to use based on MODE.""" for cls in (loop.Mount, nbd.Mount, guestfs.Mount): if cls.mode == mode: return cls raise exception.Error(_("unknown disk image handler: %s") % mode) def mount(self): """Mount a disk image, using the object attributes. The first supported means provided by the mount classes is used. True, or False is returned and the 'errors' attribute contains any diagnostics. """ if self._mounter: raise exception.Error(_('image already mounted')) if not self.mount_dir: self.mount_dir = tempfile.mkdtemp() self._mkdir = True try: for h in self.handlers: mounter_cls = self._handler_class(h) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir) if mounter.do_mount(): self._mounter = mounter break else: LOG.debug(mounter.error) self._errors.append(mounter.error) finally: if not self._mounter: self.umount() # rmdir return bool(self._mounter) def umount(self): """Unmount a disk image from the file system.""" try: if self._mounter: self._mounter.do_umount() finally: if self._mkdir: os.rmdir(self.mount_dir) # Public module functions def inject_data(image, key=None, net=None, metadata=None, admin_password=None, partition=None, use_cow=False): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If partition is not specified it mounts the image as a single partition. """ img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: inject_data_into_fs(img.mount_dir, key, net, metadata, admin_password, utils.execute) finally: img.umount() else: raise exception.Error(img.errors) def inject_files(image, files, partition=None, use_cow=False): """Injects arbitrary files into a disk image""" img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: for (path, contents) in files: _inject_file_into_fs(img.mount_dir, path, contents) finally: img.umount() else: raise exception.Error(img.errors) def setup_container(image, container_dir=None, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. LXC does not support qcow2 images yet. """ try: img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) if img.mount(): return img else: raise exception.Error(img.errors) except Exception, exn: LOG.exception(_('Failed to mount filesystem: %s'), exn) def destroy_container(img): """Destroy the container once it terminates. It will umount the container that is mounted, and delete any linked devices. LXC does not support qcow2 images yet. """ try: if img: img.umount() except Exception, exn: LOG.exception(_('Failed to remove container: %s'), exn) def inject_data_into_fs(fs, key, net, metadata, admin_password, execute): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data """ if key: _inject_key_into_fs(key, fs, execute=execute) if net: _inject_net_into_fs(net, fs, execute=execute) if metadata: _inject_metadata_into_fs(metadata, fs, execute=execute) if admin_password: _inject_admin_password_into_fs(admin_password, fs, execute=execute) def _join_and_check_path_within_fs(fs, *args): '''os.path.join() with safety check for injected file paths. Join the supplied path components and make sure that the resulting path we are injecting into is within the mounted guest fs. Trying to be clever and specifying a path with '..' in it will hit this safeguard. ''' absolute_path = os.path.realpath(os.path.join(fs, *args)) if not absolute_path.startswith(os.path.realpath(fs) + '/'): raise exception.Invalid(_('injected file path not valid')) return absolute_path def _inject_file_into_fs(fs, path, contents, append=False): absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) args = [] if append: args.append('-a') args.append(absolute_path) kwargs = dict(process_input=contents, run_as_root=True) utils.execute('tee', *args, **kwargs) def _inject_metadata_into_fs(metadata, fs, execute=None): metadata = dict([(m.key, m.value) for m in metadata]) _inject_file_into_fs(fs, 'meta.js', json.dumps(metadata)) def _inject_key_into_fs(key, fs, execute=None): """Add the given public ssh key to root's authorized_keys. key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh') utils.execute('mkdir', '-p', sshdir, run_as_root=True) utils.execute('chown', 'root', sshdir, run_as_root=True) utils.execute('chmod', '700', sshdir, run_as_root=True) keyfile = os.path.join('root', '.ssh', 'authorized_keys') key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n', ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) def _inject_net_into_fs(net, fs, execute=None): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = _join_and_check_path_within_fs(fs, 'etc', 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net) def _inject_admin_password_into_fs(admin_passwd, fs, execute=None): """Set the root password to admin_passwd admin_password is a root password fs is the path to the base of the filesystem into which to inject the key. This method modifies the instance filesystem directly, and does not require a guest agent running in the instance. """ # The approach used here is to copy the password and shadow # files from the instance filesystem to local files, make any # necessary changes, and then copy them back. admin_user = 'root' fd, tmp_passwd = tempfile.mkstemp() os.close(fd) fd, tmp_shadow = tempfile.mkstemp() os.close(fd) passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd') shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow') utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True) utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True) _set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow) utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True) os.unlink(tmp_passwd) utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True) os.unlink(tmp_shadow) def _set_passwd(username, admin_passwd, passwd_file, shadow_file): """set the password for username to admin_passwd The passwd_file is not modified. The shadow_file is updated. if the username is not found in both files, an exception is raised. :param username: the username :param encrypted_passwd: the encrypted password :param passwd_file: path to the passwd file :param shadow_file: path to the shadow password file :returns: nothing :raises: exception.Error(), IOError() """ salt_set = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789./') # encryption algo - id pairs for crypt() algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''} salt = 16 * ' ' salt = ''.join([random.choice(salt_set) for c in salt]) # crypt() depends on the underlying libc, and may not support all # forms of hash. We try md5 first. If we get only 13 characters back, # then the underlying crypt() didn't understand the '$n$salt' magic, # so we fall back to DES. # md5 is the default because it's widely supported. Although the # local crypt() might support stronger SHA, the target instance # might not. encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt) if len(encrypted_passwd) == 13: encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt) try: p_file = open(passwd_file, 'rb') s_file = open(shadow_file, 'rb') # username MUST exist in passwd file or it's an error found = False for entry in p_file: split_entry = entry.split(':') if split_entry[0] == username: found = True break if not found: msg = _('User %(username)s not found in password file.') raise exception.Error(msg % username) # update password in the shadow file.It's an error if the # the user doesn't exist. new_shadow = list() found = False for entry in s_file: split_entry = entry.split(':') if split_entry[0] == username: split_entry[1] = encrypted_passwd found = True new_entry = ':'.join(split_entry) new_shadow.append(new_entry) s_file.close() if not found: msg = _('User %(username)s not found in shadow file.') raise exception.Error(msg % username) s_file = open(shadow_file, 'wb') for entry in new_shadow: s_file.write(entry) finally: p_file.close() s_file.close()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3698_3
crossvul-python_data_good_3634_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova.rpc import common as rpc_common from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) allowed = quota.allowed_security_group_rules(context, security_group['id'], 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc_common.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') if FLAGS.auth_strategy == 'deprecated': i['imageOwnerId'] = image['properties'].get('project_id') else: i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_0
crossvul-python_data_bad_5538_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to compute resources (e.g. guest vms, networking and storage of vms, and compute hosts on which they run).""" import functools import re import time import novaclient import webob.exc from nova import block_device from nova.compute import aggregate_states from nova.compute import instance_types from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states from nova.db import base from nova import exception from nova import flags import nova.image from nova import log as logging from nova import network from nova.openstack.common import cfg import nova.policy from nova import quota from nova import rpc from nova.scheduler import api as scheduler_api from nova import utils from nova import volume LOG = logging.getLogger(__name__) find_host_timeout_opt = cfg.StrOpt('find_host_timeout', default=30, help='Timeout after NN seconds when looking for a host.') FLAGS = flags.FLAGS FLAGS.register_opt(find_host_timeout_opt) flags.DECLARE('consoleauth_topic', 'nova.consoleauth') def check_instance_state(vm_state=None, task_state=None): """Decorator to check VM and/or task state before entry to API functions. If the instance is in the wrong state, the wrapper will raise an exception. """ if vm_state is not None and not isinstance(vm_state, set): vm_state = set(vm_state) if task_state is not None and not isinstance(task_state, set): task_state = set(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if vm_state is not None and instance['vm_state'] not in vm_state: raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance['uuid'], state=instance['vm_state'], method=f.__name__) if (task_state is not None and instance['task_state'] not in task_state): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state'], method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer def wrap_check_policy(func): """Check corresponding policy prior of wrapped method to execution""" @functools.wraps(func) def wrapped(self, context, target, *args, **kwargs): check_policy(context, func.__name__, target) return func(self, context, target, *args, **kwargs) return wrapped def check_policy(context, action, target): _action = 'compute:%s' % action nova.policy.enforce(context, _action, target) class BaseAPI(base.Base): """Base API class.""" def __init__(self, **kwargs): super(BaseAPI, self).__init__(**kwargs) def _cast_or_call_compute_message(self, rpc_method, compute_method, context, instance=None, host=None, params=None): """Generic handler for RPC casts and calls to compute. :param rpc_method: RPC method to use (rpc.call or rpc.cast) :param compute_method: Compute manager method to call :param context: RequestContext of caller :param instance: The instance object to use to find host to send to Can be None to not include instance_uuid in args :param host: Optional host to send to instead of instance['host'] Must be specified if 'instance' is None :param params: Optional dictionary of arguments to be passed to the compute worker :returns: None """ if not params: params = {} if not host: if not instance: raise exception.Error(_("No compute host specified")) host = instance['host'] if not host: raise exception.Error(_("Unable to find host for " "Instance %s") % instance['uuid']) queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) if instance: params['instance_uuid'] = instance['uuid'] kwargs = {'method': compute_method, 'args': params} return rpc_method(context, queue, kwargs) def _cast_compute_message(self, *args, **kwargs): """Generic handler for RPC casts to compute.""" self._cast_or_call_compute_message(rpc.cast, *args, **kwargs) def _call_compute_message(self, *args, **kwargs): """Generic handler for RPC calls to compute.""" return self._cast_or_call_compute_message(rpc.call, *args, **kwargs) @staticmethod def _cast_scheduler_message(context, args): """Generic handler for RPC calls to the scheduler.""" rpc.cast(context, FLAGS.scheduler_topic, args) class API(BaseAPI): """API for interacting with the compute manager.""" def __init__(self, image_service=None, network_api=None, volume_api=None, **kwargs): self.image_service = (image_service or nova.image.get_default_image_service()) self.network_api = network_api or network.API() self.volume_api = volume_api or volume.API() self.sgh = utils.import_object(FLAGS.security_group_handler) super(API, self).__init__(**kwargs) def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ if injected_files is None: return limit = quota.allowed_injected_files(context, len(injected_files)) if len(injected_files) > limit: raise exception.QuotaError(code="OnsetFileLimitExceeded") path_limit = quota.allowed_injected_file_path_bytes(context) for path, content in injected_files: if len(path) > path_limit: raise exception.QuotaError(code="OnsetFilePathLimitExceeded") content_limit = quota.allowed_injected_file_content_bytes( context, len(content)) if len(content) > content_limit: code = "OnsetFileContentLimitExceeded" raise exception.QuotaError(code=code) def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" if not metadata: metadata = {} num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id msg = _("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals() LOG.warn(msg) raise exception.QuotaError(code="MetadataLimitExceeded") # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for k, v in metadata.iteritems(): if len(k) > 255 or len(v) > 255: pid = context.project_id msg = _("Quota exceeded for %(pid)s, metadata property " "key or value too long") % locals() LOG.warn(msg) raise exception.QuotaError(code="MetadataLimitExceeded") def _check_requested_networks(self, context, requested_networks): """ Check if the networks requested belongs to the project and the fixed IP address for each network provided is within same the network block """ if requested_networks is None: return self.network_api.validate_networks(context, requested_networks) def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, reservation_id=None, create_instance_here=False, scheduler_hints=None): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation.""" if not metadata: metadata = {} if not display_description: display_description = '' if not security_group: security_group = 'default' if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count if not metadata: metadata = {} block_device_mapping = block_device_mapping or [] num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id if num_instances <= 0: msg = _("Cannot run any more instances of this type.") else: msg = (_("Can only run %s more instances of this type.") % num_instances) LOG.warn(_("Quota exceeded for %(pid)s," " tried to run %(min_count)s instances. " + msg) % locals()) raise exception.QuotaError(code="InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) self._check_requested_networks(context, requested_networks) (image_service, image_id) = nova.image.get_image_service(context, image_href) image = image_service.show(context, image_id) if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() config_drive_id = None if config_drive and config_drive is not True: # config_drive is volume id config_drive, config_drive_id = None, config_drive os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] architecture = None if 'properties' in image and 'arch' in image['properties']: architecture = image['properties']['arch'] vm_mode = None if 'properties' in image and 'vm_mode' in image['properties']: vm_mode = image['properties']['vm_mode'] # If instance doesn't have auto_disk_config overridden by request, use # whatever the image indicates if auto_disk_config is None: if ('properties' in image and 'auto_disk_config' in image['properties']): auto_disk_config = utils.bool_from_str( image['properties']['auto_disk_config']) if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) LOG.debug(_("Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s") % locals()) if kernel_id: image_service.show(context, kernel_id) if ramdisk_id: image_service.show(context, ramdisk_id) if config_drive_id: image_service.show(context, config_drive_id) self.ensure_default_security_group(context) if key_data is None and key_name: key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') root_device_name = block_device.properties_root_device_name( image['properties']) # NOTE(vish): We have a legacy hack to allow admins to specify hosts # via az using az:host. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. host = None if availability_zone: availability_zone, _x, host = availability_zone.partition(':') if not availability_zone: availability_zone = FLAGS.default_schedule_zone if context.is_admin and host: filter_properties = {'force_hosts': [host]} else: filter_properties = {} filter_properties['scheduler_hints'] = scheduler_hints base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive_id': config_drive_id or '', 'config_drive': config_drive or '', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'root_gb': instance_type['root_gb'], 'ephemeral_gb': instance_type['ephemeral_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'access_ip_v4': access_ip_v4, 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, 'root_device_name': root_device_name, 'progress': 0, 'auto_disk_config': auto_disk_config} LOG.debug(_("Going to run %s instances...") % num_instances) if create_instance_here: instance = self.create_db_entry_for_new_instance( context, instance_type, image, base_options, security_group, block_device_mapping) # Tells scheduler we created the instance already. base_options['uuid'] = instance['uuid'] rpc_method = rpc.cast else: # We need to wait for the scheduler to create the instance # DB entries, because the instance *could* be # created in # a child zone. rpc_method = rpc.call # TODO(comstud): We should use rpc.multicall when we can # retrieve the full instance dictionary from the scheduler. # Otherwise, we could exceed the AMQP max message size limit. # This would require the schedulers' schedule_run_instances # methods to return an iterator vs a list. instances = self._schedule_run_instance( rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties) if create_instance_here: return ([instance], reservation_id) return (instances, reservation_id) @staticmethod def _volume_size(instance_type, virtual_name): size = 0 if virtual_name == 'swap': size = instance_type.get('swap', 0) elif block_device.is_ephemeral(virtual_name): num = block_device.ephemeral_num(virtual_name) # TODO(yamahata): ephemeralN where N > 0 # Only ephemeral0 is allowed for now because InstanceTypes # table only allows single local disk, ephemeral_gb. # In order to enhance it, we need to add a new columns to # instance_types table. if num > 0: return 0 size = instance_type.get('ephemeral_gb') return size def _update_image_block_device_mapping(self, elevated_context, instance_type, instance_id, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ instance_type = (instance_type or instance_types.get_default_instance_type()) for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue if not block_device.is_swap_or_ephemeral(virtual_name): continue size = self._volume_size(instance_type, virtual_name) if size == 0: continue values = { 'instance_id': instance_id, 'device_name': bdm['device'], 'virtual_name': virtual_name, 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) def _update_block_device_mapping(self, elevated_context, instance_type, instance_id, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ LOG.debug(_("block_device_mapping %s"), block_device_mapping) for bdm in block_device_mapping: assert 'device_name' in bdm values = {'instance_id': instance_id} for key in ('device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device'): values[key] = bdm.get(key) virtual_name = bdm.get('virtual_name') if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): size = self._volume_size(instance_type, virtual_name) if size == 0: continue values['volume_size'] = size # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'volume_id', 'snapshot_id', 'volume_id', 'volume_size', 'virtual_name'): values[k] = None self.db.block_device_mapping_update_or_create(elevated_context, values) #NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, base_options, security_group, block_device_mapping): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). This is called by the scheduler after a location for the instance has been determined. """ elevated = context.elevated() if security_group is None: security_group = ['default'] if not isinstance(security_group, list): security_group = [security_group] security_groups = [] for security_group_name in security_group: group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) base_options.setdefault('launch_index', 0) instance = self.db.instance_create(context, base_options) instance_id = instance['id'] instance_uuid = instance['uuid'] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_uuid, security_group_id) # BlockDeviceMapping table self._update_image_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('mappings', [])) self._update_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('block_device_mapping', [])) # override via command line option self._update_block_device_mapping(elevated, instance_type, instance_id, block_device_mapping) # Set sane defaults if not specified updates = {} display_name = instance.get('display_name') if display_name is None: display_name = self._default_display_name(instance_id) hostname = instance.get('hostname') if hostname is None: hostname = display_name updates['display_name'] = display_name updates['hostname'] = utils.sanitize_hostname(hostname) updates['vm_state'] = vm_states.BUILDING updates['task_state'] = task_states.SCHEDULING if (image['properties'].get('mappings', []) or image['properties'].get('block_device_mapping', []) or block_device_mapping): updates['shutdown_terminate'] = False instance = self.update(context, instance, **updates) return instance def _default_display_name(self, instance_id): return "Server %s" % instance_id def _schedule_run_instance(self, rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): """Send a run_instance request to the schedulers for processing.""" pid = context.project_id uid = context.user_id LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") % locals()) request_spec = { 'image': utils.to_primitive(image), 'instance_properties': base_options, 'instance_type': instance_type, 'num_instances': num_instances, 'block_device_mapping': block_device_mapping, 'security_group': security_group, } return rpc_method(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "request_spec": request_spec, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks, "is_first_time": True, "filter_properties": filter_properties}}) def _check_create_policies(self, context, availability_zone, requested_networks, block_device_mapping): """Check policies for create().""" target = {'project_id': context.project_id, 'user_id': context.user_id, 'availability_zone': availability_zone} check_policy(context, 'create', target) if requested_networks: check_policy(context, 'create:attach_network', target) if block_device_mapping: check_policy(context, 'create:attach_volume', target) def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None): """ Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) where instances could be 'None' or a list of instance dicts depending on if we waited for information from the scheduler or not. """ self._check_create_policies(context, availability_zone, requested_networks, block_device_mapping) # We can create the DB entry for the instance here if we're # only going to create 1 instance. # This speeds up API responses for builds # as we don't need to wait for the scheduler. create_instance_here = max_count == 1 (instances, reservation_id) = self._create_instance( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, create_instance_here=create_instance_here, scheduler_hints=scheduler_hints) if create_instance_here or instances is None: return (instances, reservation_id) inst_ret_list = [] for instance in instances: if instance.get('_is_precooked', False): inst_ret_list.append(instance) else: # Scheduler only gives us the 'id'. We need to pull # in the created instances from the DB instance = self.db.instance_get(context, instance['id']) inst_ret_list.append(dict(instance.iteritems())) return (inst_ret_list, reservation_id) def ensure_default_security_group(self, context): """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context """ try: self.db.security_group_get_by_name(context, context.project_id, 'default') except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} self.db.security_group_create(context, values) self.sgh.trigger_security_group_create_refresh(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group.""" security_group = self.db.security_group_get(context, security_group_id) hosts = set() for instance in security_group['instances']: if instance['host'] is not None: hosts.add(instance['host']) for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. """ # First, we get the security group rules that reference these groups as # the grantee.. security_group_rules = set() for group_id in group_ids: security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, group_id)) # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: security_group = self.db.security_group_get( context, rule['parent_group_id']) security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) # ...then we find the hosts where they live... hosts = set() for instance in instances: if instance['host']: hosts.add(instance['host']) # ...and finally we tell these nodes to refresh their view of this # particular security group. for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall""" hosts = [x['host'] for (x, idx) in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'refresh_provider_fw_rules', 'args': {}}) def _is_security_group_associated_with_server(self, security_group, instance_uuid): """Check if the security group is already associated with the instance. If Yes, return True. """ if not security_group: return False instances = security_group.get('instances') if not instances: return False for inst in instances: if (instance_uuid == inst['uuid']): return True return False @wrap_check_policy def add_security_group(self, context, instance, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if self._is_security_group_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_add_security_group(context.elevated(), instance_uuid, security_group['id']) params = {"security_group_id": security_group['id']} # NOTE(comstud): No instance_uuid argument to this compute manager # call self._cast_compute_message('refresh_security_group_rules', context, host=instance['host'], params=params) @wrap_check_policy def remove_security_group(self, context, instance, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if not self._is_security_group_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_remove_security_group(context.elevated(), instance_uuid, security_group['id']) params = {"security_group_id": security_group['id']} # NOTE(comstud): No instance_uuid argument to this compute manager # call self._cast_compute_message('refresh_security_group_rules', context, host=instance['host'], params=params) @wrap_check_policy def update(self, context, instance, **kwargs): """Updates the instance in the datastore. :param context: The security context :param instance: The instance to update :param kwargs: All additional keyword args are treated as data fields of the instance to be updated :returns: None """ rv = self.db.instance_update(context, instance["id"], kwargs) return dict(rv.iteritems()) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.ERROR]) def soft_delete(self, context, instance): """Terminate an instance.""" LOG.debug(_('Going to try to soft delete instance'), instance=instance) if instance['disable_terminate']: return # NOTE(jerdfelt): The compute daemon handles reclaiming instances # that are in soft delete. If there is no host assigned, there is # no daemon to reclaim, so delete it immediately. host = instance['host'] if host: self.update(context, instance, vm_state=vm_states.SOFT_DELETE, task_state=task_states.POWERING_OFF, deleted_at=utils.utcnow()) self._cast_compute_message('power_off_instance', context, instance) else: LOG.warning(_('No host for instance, deleting immediately'), instance=instance) try: self.db.instance_destroy(context, instance['id']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass def _delete(self, context, instance): host = instance['host'] try: if host: self.update(context, instance, task_state=task_states.DELETING, progress=0) self._cast_compute_message('terminate_instance', context, instance) else: self.db.instance_destroy(context, instance['id']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are # allowed but the EC2 API appears to allow from RESCUED and STOPPED # too @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING, vm_states.ERROR, vm_states.RESCUED, vm_states.SHUTOFF, vm_states.STOPPED]) def delete(self, context, instance): """Terminate an instance.""" LOG.debug(_("Going to try to terminate instance"), instance=instance) if instance['disable_terminate']: return self._delete(context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, deleted_at=None) host = instance['host'] if host: self.update(context, instance, task_state=task_states.POWERING_ON) self._cast_compute_message('power_on_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) def force_delete(self, context, instance): """Force delete a previously deleted (but not reclaimed) instance.""" self._delete(context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def stop(self, context, instance, do_cast=True): """Stop an instance.""" instance_uuid = instance["uuid"] LOG.debug(_("Going to try to stop instance"), instance=instance) self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.STOPPING, terminated_at=utils.utcnow(), progress=0) rpc_method = rpc.cast if do_cast else rpc.call self._cast_or_call_compute_message(rpc_method, 'stop_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF]) def start(self, context, instance): """Start an instance.""" vm_state = instance["vm_state"] instance_uuid = instance["uuid"] LOG.debug(_("Going to try to start instance"), instance=instance) if vm_state == vm_states.SHUTOFF: if instance['shutdown_terminate']: LOG.warning(_("Instance %(instance_uuid)s is not " "stopped. (%(vm_state)s") % locals()) return # NOTE(yamahata): nova compute doesn't reap instances # which initiated shutdown itself. So reap it here. self.stop(context, instance, do_cast=False) self.update(context, instance, vm_state=vm_states.STOPPED, task_state=task_states.STARTING) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. self._cast_compute_message('start_instance', context, instance) #NOTE(bcwaldon): no policy check here since it should be rolled in to # search_opts in get_all def get_active_by_window(self, context, begin, end=None, project_id=None): """Get instances that were continuously active over a window.""" return self.db.instance_get_active_by_window(context, begin, end, project_id) #NOTE(bcwaldon): this doesn't really belong in this class def get_instance_type(self, context, instance_type_id): """Get an instance type by instance type id.""" return instance_types.get_instance_type(instance_type_id) def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(ameade): we still need to support integer ids for ec2 if utils.is_uuid_like(instance_id): instance = self.db.instance_get_by_uuid(context, instance_id) else: instance = self.db.instance_get(context, instance_id) check_policy(context, 'get', instance) inst = dict(instance.iteritems()) # NOTE(comstud): Doesn't get returned with iteritems inst['name'] = instance['name'] return inst def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc'): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retrieve all instances in the system. Deleted instances will be returned by default, unless there is a search option that says otherwise. The results will be returned sorted in the order specified by the 'sort_dir' parameter using the key specified in the 'sort_key' parameter. """ #TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, } check_policy(context, "get_all", target) if search_opts is None: search_opts = {} LOG.debug(_("Searching by: %s") % str(search_opts)) # Fixups for the DB call filters = {} def _remap_flavor_filter(flavor_id): try: instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) except exception.FlavorNotFound: raise ValueError() filters['instance_type_id'] = instance_type['id'] def _remap_fixed_ip_filter(fixed_ip): # Turn fixed_ip into a regexp match. Since '.' matches # any character, we need to use regexp escaping for it. filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') # search_option to filter_name mapping. filter_mapping = { 'image': 'image_ref', 'name': 'display_name', 'instance_name': 'name', 'tenant_id': 'project_id', 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} # copy from search_opts, doing various remappings as necessary for opt, value in search_opts.iteritems(): # Do remappings. # Values not in the filter_mapping table are copied as-is. # If remapping is None, option is not copied # If the remapping is a string, it is the filter_name to use try: remap_object = filter_mapping[opt] except KeyError: filters[opt] = value else: # Remaps are strings to translate to, or functions to call # to do the translating as defined by the table above. if isinstance(remap_object, basestring): filters[remap_object] = value else: try: remap_object(value) # We already know we can't match the filter, so # return an empty list except ValueError: return [] inst_models = self._get_instances_by_filters(context, filters, sort_key, sort_dir) # Convert the models to dictionaries instances = [] for inst_model in inst_models: instance = dict(inst_model.iteritems()) # NOTE(comstud): Doesn't get returned by iteritems instance['name'] = inst_model['name'] instances.append(instance) return instances def _get_instances_by_filters(self, context, filters, sort_key, sort_dir): if 'ip6' in filters or 'ip' in filters: res = self.network_api.get_instance_uuids_by_ip_filter(context, filters) # NOTE(jkoelker) It is possible that we will get the same # instance uuid twice (one for ipv4 and ipv6) uuids = set([r['instance_uuid'] for r in res]) filters['uuid'] = uuids return self.db.instance_get_all_by_filters(context, filters, sort_key, sort_dir) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF]) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): """Backup the given instance :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot name = backup_type # daily backups are called 'daily' :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ recv_meta = self._create_image(context, instance, name, 'backup', backup_type=backup_type, rotation=rotation, extra_properties=extra_properties) return recv_meta @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF]) def snapshot(self, context, instance, name, extra_properties=None): """Snapshot the given instance. :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: A dict containing image metadata """ return self._create_image(context, instance, name, 'snapshot', extra_properties=extra_properties) def _create_image(self, context, instance, name, image_type, backup_type=None, rotation=None, extra_properties=None): """Create snapshot or backup for an instance on this host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param name: string for name of the snapshot :param image_type: snapshot | backup :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ instance_uuid = instance['uuid'] if image_type == "snapshot": task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) self.db.instance_test_and_set( context, instance_uuid, 'task_state', [None], task_state) properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), 'image_type': image_type, } sent_meta = {'name': name, 'is_public': False} if image_type == 'backup': properties['backup_type'] = backup_type elif image_type == 'snapshot': min_ram, min_disk = self._get_minram_mindisk_params(context, instance) if min_ram is not None: sent_meta['min_ram'] = min_ram if min_disk is not None: sent_meta['min_disk'] = min_disk properties.update(extra_properties or {}) sent_meta['properties'] = properties recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id'], 'image_type': image_type, 'backup_type': backup_type, 'rotation': rotation} self._cast_compute_message('snapshot_instance', context, instance, params=params) return recv_meta def _get_minram_mindisk_params(self, context, instance): try: #try to get source image of the instance orig_image = self.image_service.show(context, instance['image_ref']) except exception.ImageNotFound: return None, None #disk format of vhd is non-shrinkable if orig_image.get('disk_format') == 'vhd': min_ram = instance['instance_type']['memory_mb'] min_disk = instance['instance_type']['root_gb'] else: #set new image values to the original image values min_ram = orig_image.get('min_ram') min_disk = orig_image.get('min_disk') return min_ram, min_disk @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def reboot(self, context, instance, reboot_type): """Reboot the given instance.""" state = {'SOFT': task_states.REBOOTING, 'HARD': task_states.REBOOTING_HARD}[reboot_type] self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=state) self._cast_compute_message('reboot_instance', context, instance, params={'reboot_type': reboot_type}) def _validate_image_href(self, context, image_href): """Throws an ImageNotFound exception if image_href does not exist.""" (image_service, image_id) = nova.image.get_image_service(context, image_href) image_service.show(context, image_id) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[None, task_states.RESIZE_VERIFY]) def rebuild(self, context, instance, image_href, admin_password, **kwargs): """Rebuild the given instance with the provided attributes.""" self._validate_image_href(context, image_href) files_to_inject = kwargs.pop('files_to_inject', []) self._check_injected_file_quota(context, files_to_inject) metadata = kwargs.get('metadata', {}) self._check_metadata_properties_quota(context, metadata) self.update(context, instance, image_ref=image_href, vm_state=vm_states.REBUILDING, task_state=None, progress=0, **kwargs) rebuild_params = { "new_pass": admin_password, "injected_files": files_to_inject, } self._cast_compute_message('rebuild_instance', context, instance, params=rebuild_params) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[task_states.RESIZE_VERIFY]) def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') self.update(context, instance, vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_REVERTING) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance, host=migration_ref['dest_compute'], params=params) self.db.migration_update(context, migration_ref['id'], {'status': 'reverted'}) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[task_states.RESIZE_VERIFY]) def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance, host=migration_ref['source_compute'], params=params) self.db.migration_update(context, migration_ref['id'], {'status': 'confirmed'}) self.db.instance_update(context, instance['uuid'], {'host': migration_ref['dest_compute'], }) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[None]) def resize(self, context, instance, flavor_id=None, **kwargs): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ current_instance_type = instance['instance_type'] # If flavor_id is not provided, only migrate the instance. if not flavor_id: LOG.debug(_("flavor_id is None. Assuming migration.")) new_instance_type = current_instance_type else: new_instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s") % locals()) if not new_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) # NOTE(markwash): look up the image early to avoid auth problems later image = self.image_service.show(context, instance['image_ref']) current_memory_mb = current_instance_type['memory_mb'] new_memory_mb = new_instance_type['memory_mb'] if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() self.update(context, instance, vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_PREP, progress=0, **kwargs) request_spec = { 'instance_type': new_instance_type, 'num_instances': 1, 'instance_properties': instance} filter_properties = {'ignore_hosts': []} if not FLAGS.allow_resize_to_same_host: filter_properties['ignore_hosts'].append(instance['host']) args = { "topic": FLAGS.compute_topic, "instance_uuid": instance['uuid'], "instance_type_id": new_instance_type['id'], "image": image, "update_db": False, "request_spec": utils.to_primitive(request_spec), "filter_properties": filter_properties, } self._cast_scheduler_message(context, {"method": "prep_resize", "args": args}) @wrap_check_policy def add_fixed_ip(self, context, instance, network_id): """Add fixed_ip from specified network to given instance.""" self._cast_compute_message('add_fixed_ip_to_instance', context, instance, params=dict(network_id=network_id)) @wrap_check_policy def remove_fixed_ip(self, context, instance, address): """Remove fixed_ip from specified network to given instance.""" self._cast_compute_message('remove_fixed_ip_from_instance', context, instance, params=dict(address=address)) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def pause(self, context, instance): """Pause the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.PAUSING) self._cast_compute_message('pause_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.PAUSED]) def unpause(self, context, instance): """Unpause the given instance.""" self.update(context, instance, vm_state=vm_states.PAUSED, task_state=task_states.UNPAUSING) self._cast_compute_message('unpause_instance', context, instance) @wrap_check_policy def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self._call_compute_message("get_diagnostics", context, instance) @wrap_check_policy def get_actions(self, context, instance): """Retrieve actions for the given instance.""" return self.db.instance_get_actions(context, instance['uuid']) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def suspend(self, context, instance): """Suspend the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING) self._cast_compute_message('suspend_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SUSPENDED]) def resume(self, context, instance): """Resume the given instance.""" self.update(context, instance, vm_state=vm_states.SUSPENDED, task_state=task_states.RESUMING) self._cast_compute_message('resume_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED], task_state=[None, task_states.RESIZE_VERIFY]) def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING) rescue_params = { "rescue_password": rescue_password } self._cast_compute_message('rescue_instance', context, instance, params=rescue_params) @wrap_check_policy @check_instance_state(vm_state=[vm_states.RESCUED]) def unrescue(self, context, instance): """Unrescue the given instance.""" self.update(context, instance, vm_state=vm_states.RESCUED, task_state=task_states.UNRESCUING) self._cast_compute_message('unrescue_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE]) def set_admin_password(self, context, instance, password=None): """Set the root/admin password for the given instance.""" self.update(context, instance, task_state=task_states.UPDATING_PASSWORD) params = {"new_pass": password} self._cast_compute_message('set_admin_password', context, instance, params=params) @wrap_check_policy def inject_file(self, context, instance, path, file_contents): """Write a file to the given instance.""" params = {'path': path, 'file_contents': file_contents} self._cast_compute_message('inject_file', context, instance, params=params) @wrap_check_policy def get_vnc_console(self, context, instance, console_type): """Get a url to an instance Console.""" connect_info = self._call_compute_message('get_vnc_console', context, instance, params={"console_type": console_type}) rpc.call(context, '%s' % FLAGS.consoleauth_topic, {'method': 'authorize_console', 'args': {'token': connect_info['token'], 'console_type': console_type, 'host': connect_info['host'], 'port': connect_info['port'], 'internal_access_path': connect_info['internal_access_path']}}) return {'url': connect_info['access_url']} @wrap_check_policy def get_console_output(self, context, instance, tail_length=None): """Get console output for an an instance.""" params = {'tail_length': tail_length} return self._call_compute_message('get_console_output', context, instance, params=params) @wrap_check_policy def lock(self, context, instance): """Lock the given instance.""" self._cast_compute_message('lock_instance', context, instance) @wrap_check_policy def unlock(self, context, instance): """Unlock the given instance.""" self._cast_compute_message('unlock_instance', context, instance) @wrap_check_policy def get_lock(self, context, instance): """Return the boolean state of given instance's lock.""" return self.get(context, instance['uuid'])['locked'] @wrap_check_policy def reset_network(self, context, instance): """Reset networking on the instance.""" self._cast_compute_message('reset_network', context, instance) @wrap_check_policy def inject_network_info(self, context, instance): """Inject network info for the instance.""" self._cast_compute_message('inject_network_info', context, instance) @wrap_check_policy def attach_volume(self, context, instance, volume_id, device): """Attach an existing volume to an existing instance.""" if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device): raise exception.InvalidDevicePath(path=device) volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume) self.volume_api.reserve_volume(context, volume) params = {"volume_id": volume_id, "mountpoint": device} self._cast_compute_message('attach_volume', context, instance, params=params) # FIXME(comstud): I wonder if API should pull in the instance from # the volume ID via volume API and pass it and the volume object here def detach_volume(self, context, volume_id): """Detach a volume from an instance.""" instance = self.db.volume_get_instance(context.elevated(), volume_id) if not instance: raise exception.VolumeUnattached(volume_id=volume_id) check_policy(context, 'detach_volume', instance) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) params = {'volume_id': volume_id} self._cast_compute_message('detach_volume', context, instance, params=params) return instance @wrap_check_policy def associate_floating_ip(self, context, instance, address): """Makes calls to network_api to associate_floating_ip. :param address: is a string floating ip address """ instance_uuid = instance['uuid'] # TODO(tr3buchet): currently network_info doesn't contain floating IPs # in its info, if this changes, the next few lines will need to # accommodate the info containing floating as well as fixed ip # addresses nw_info = self.network_api.get_instance_nw_info(context.elevated(), instance) if not nw_info: raise exception.FixedIpNotFoundForInstance( instance_id=instance_uuid) ips = [ip for ip in nw_info[0].fixed_ips()] if not ips: raise exception.FixedIpNotFoundForInstance( instance_id=instance_uuid) # TODO(tr3buchet): this will associate the floating IP with the # first fixed_ip (lowest id) an instance has. This should be # changed to support specifying a particular fixed_ip if # multiple exist. if len(ips) > 1: msg = _('multiple fixedips exist, using the first: %s') LOG.warning(msg, ips[0]['address']) self.network_api.associate_floating_ip(context, floating_address=address, fixed_address=ips[0]['address']) self.network_api.invalidate_instance_cache(context.elevated(), instance) @wrap_check_policy def get_instance_metadata(self, context, instance): """Get all metadata associated with an instance.""" rv = self.db.instance_metadata_get(context, instance['id']) return dict(rv.iteritems()) @wrap_check_policy def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance['id'], key) @wrap_check_policy def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: _metadata = self.get_instance_metadata(context, instance) _metadata.update(metadata) self._check_metadata_properties_quota(context, _metadata) self.db.instance_metadata_update(context, instance['id'], _metadata, True) return _metadata def get_instance_faults(self, context, instances): """Get all faults for a list of instance uuids.""" if not instances: return {} for instance in instances: check_policy(context, 'get_instance_faults', instance) uuids = [instance['uuid'] for instance in instances] return self.db.instance_fault_get_by_instance_uuids(context, uuids) class HostAPI(BaseAPI): """Sub-set of the Compute Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self._call_compute_message("set_host_enabled", context, host=host, params={"enabled": enabled}) def host_power_action(self, context, host, action): """Reboots, shuts down or powers up the host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self._call_compute_message("host_power_action", context, host=host, params={"action": action}) def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self._call_compute_message("host_maintenance_mode", context, host=host, params={"host": host, "mode": mode}) class AggregateAPI(base.Base): """Sub-set of the Compute Manager API for managing host aggregates.""" def __init__(self, **kwargs): super(AggregateAPI, self).__init__(**kwargs) def create_aggregate(self, context, aggregate_name, availability_zone): """Creates the model for the aggregate.""" zones = [s.availability_zone for s in self.db.service_get_all_by_topic(context, FLAGS.compute_topic)] if availability_zone in zones: values = {"name": aggregate_name, "availability_zone": availability_zone} aggregate = self.db.aggregate_create(context, values) return dict(aggregate.iteritems()) else: raise exception.InvalidAggregateAction(action='create_aggregate', aggregate_id="'N/A'", reason='invalid zone') def get_aggregate(self, context, aggregate_id): """Get an aggregate by id.""" aggregate = self.db.aggregate_get(context, aggregate_id) return self._get_aggregate_info(context, aggregate) def get_aggregate_list(self, context): """Get all the aggregates for this zone.""" aggregates = self.db.aggregate_get_all(context, read_deleted="no") return [self._get_aggregate_info(context, a) for a in aggregates] def update_aggregate(self, context, aggregate_id, values): """Update the properties of an aggregate.""" aggregate = self.db.aggregate_update(context, aggregate_id, values) return self._get_aggregate_info(context, aggregate) def update_aggregate_metadata(self, context, aggregate_id, metadata): """Updates the aggregate metadata. If a key is set to None, it gets removed from the aggregate metadata. """ # As a first release of the host aggregates blueprint, this call is # pretty dumb, in the sense that interacts only with the model. # In later releasses, updating metadata may trigger virt actions like # the setup of shared storage, or more generally changes to the # underlying hypervisor pools. for key in metadata.keys(): if not metadata[key]: try: self.db.aggregate_metadata_delete(context, aggregate_id, key) metadata.pop(key) except exception.AggregateMetadataNotFound, e: LOG.warn(e.message) self.db.aggregate_metadata_add(context, aggregate_id, metadata) return self.get_aggregate(context, aggregate_id) def delete_aggregate(self, context, aggregate_id): """Deletes the aggregate.""" hosts = self.db.aggregate_host_get_all(context, aggregate_id, read_deleted="no") if len(hosts) > 0: raise exception.InvalidAggregateAction(action='delete', aggregate_id=aggregate_id, reason='not empty') self.db.aggregate_delete(context, aggregate_id) def add_host_to_aggregate(self, context, aggregate_id, host): """Adds the host to an aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] # add host, and reflects action in the aggregate operational state aggregate = self.db.aggregate_get(context, aggregate_id) if aggregate.operational_state in [aggregate_states.CREATED, aggregate_states.ACTIVE]: if service.availability_zone != aggregate.availability_zone: raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason='availibility zone mismatch') self.db.aggregate_host_add(context, aggregate_id, host) if aggregate.operational_state == aggregate_states.CREATED: values = {'operational_state': aggregate_states.CHANGING} self.db.aggregate_update(context, aggregate_id, values) queue = self.db.queue_get_for(context, service.topic, host) rpc.cast(context, queue, {"method": "add_aggregate_host", "args": {"aggregate_id": aggregate_id, "host": host}, }) return self.get_aggregate(context, aggregate_id) else: invalid = {aggregate_states.CHANGING: 'setup in progress', aggregate_states.DISMISSED: 'aggregate deleted', aggregate_states.ERROR: 'aggregate in error', } if aggregate.operational_state in invalid.keys(): raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason=invalid[aggregate.operational_state]) def remove_host_from_aggregate(self, context, aggregate_id, host): """Removes host from the aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] aggregate = self.db.aggregate_get(context, aggregate_id) if aggregate.operational_state in [aggregate_states.ACTIVE, aggregate_states.ERROR]: self.db.aggregate_host_delete(context, aggregate_id, host) queue = self.db.queue_get_for(context, service.topic, host) rpc.cast(context, queue, {"method": "remove_aggregate_host", "args": {"aggregate_id": aggregate_id, "host": host}, }) return self.get_aggregate(context, aggregate_id) else: invalid = {aggregate_states.CREATED: 'no hosts to remove', aggregate_states.CHANGING: 'setup in progress', aggregate_states.DISMISSED: 'aggregate deleted', } if aggregate.operational_state in invalid.keys(): raise exception.InvalidAggregateAction( action='remove host', aggregate_id=aggregate_id, reason=invalid[aggregate.operational_state]) def _get_aggregate_info(self, context, aggregate): """Builds a dictionary with aggregate props, metadata and hosts.""" metadata = self.db.aggregate_metadata_get(context, aggregate.id) hosts = self.db.aggregate_host_get_all(context, aggregate.id, read_deleted="no") result = dict(aggregate.iteritems()) result["metadata"] = metadata result["hosts"] = hosts return result
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5538_0
crossvul-python_data_bad_5538_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import sys import novaclient.exceptions import webob.exc from nova import log as logging LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) class Error(Exception): pass class EC2APIError(Error): def __init__(self, message='Unknown', code=None): self.msg = message self.code = code if code: outstr = '%s: %s' % (code, message) else: outstr = '%s' % message super(EC2APIError, self).__init__(outstr) class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ # TODO(sandy): Find a way to import nova.notifier.api so we don't have # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( # TODO(johannes): Also, it would be nice to use # utils.save_and_reraise_exception() without an import loop def inner(f): def wrapped(*args, **kw): try: return f(*args, **kw) except Exception, e: # Save exception since it can be clobbered during processing # below before we can re-raise exc_info = sys.exc_info() if notifier: payload = dict(args=args, exception=e) payload.update(kw) # Use a temp vars so we don't shadow # our outer definitions. temp_level = level if not temp_level: temp_level = notifier.ERROR temp_type = event_type if not temp_type: # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. temp_type = f.__name__ notifier.notify(publisher_id, temp_type, temp_level, payload) # re-raise original exception since it may have been clobbered raise exc_info[0], exc_info[1], exc_info[2] return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # at least get the core message out if something happened message = self.message super(NovaException, self).__init__(message) class DecryptionFailure(NovaException): message = _("Failed to decrypt text") class ImagePaginationFailed(NovaException): message = _("Failed to paginate through images from image service") class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): message = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): message = _("Connection to glance failed") + ": %(reason)s" class MelangeConnectionFailed(NovaException): message = _("Connection to melange failed") + ": %(reason)s" class NotAuthorized(NovaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(NovaException): message = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot") + ": %(reason)s" class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") class InvalidKeypair(Invalid): message = _("Keypair data is invalid") class SfJsonEncodeFailure(NovaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidSignature(Invalid): message = _("Invalid signature %(signature)s for user %(user)s.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidInstanceType(Invalid): message = _("Invalid instance type %(instance_type)s.") class InvalidVolumeType(Invalid): message = _("Invalid volume type") + ": %(reason)s" class InvalidVolume(Invalid): message = _("Invalid volume") + ": %(reason)s" class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): message = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): message = _("Invalid cidr %(cidr)s.") class InvalidRPCConnectionReuse(Invalid): message = _("Invalid reuse of an RPC connection.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAggregateAction(Invalid): message = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): message = _("Group not valid. Reason: %(reason)s") class InstanceInvalidState(Invalid): message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") class InstanceNotSuspended(Invalid): message = _("Instance %(instance_id)s is not suspended.") class InstanceNotInRescueMode(Invalid): message = _("Instance %(instance_id)s is not in rescue mode") class InstanceSuspendFailure(Invalid): message = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): message = _("Failed to resume server") + ": %(reason)s." class InstanceRebootFailure(Invalid): message = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): message = _("Failed to terminate instance") + ": %(reason)s" class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class VolumeServiceUnavailable(ServiceUnavailable): message = _("Volume service is unavailable at this time.") class ComputeServiceUnavailable(ServiceUnavailable): message = _("Compute service is unavailable at this time.") class UnableToMigrateToSelf(Invalid): message = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class DestinationHostUnavailable(Invalid): message = _("Destination compute host is unavailable at this time.") class SourceHostUnavailable(Invalid): message = _("Original compute host is unavailable at this time.") class InvalidHypervisorType(Invalid): message = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): message = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): message = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") class DeviceIsBusy(Invalid): message = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): message = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): message = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): message = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): message = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): message = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): message = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): message = _("Ec2 id %(ec2_id)s is unacceptable.") class NotFound(NovaException): message = _("Resource could not be found.") code = 404 class FlagNotSet(NotFound): message = _("Required flag %(flag)s not set.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class SfAccountNotFound(NotFound): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class NoVolumeTypesFound(NotFound): message = _("Zero volume types found.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class VolumeIsBusy(NovaException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(NovaException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class DiskNotFound(NotFound): message = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): message = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ListingImageRefsNotSupported(Invalid): message = _("Some images have been stored via hrefs." + " This version of the api does not support displaying image hrefs.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class KernelNotFoundForImage(ImageNotFound): message = _("Kernel not found for image %(image_id)s.") class UserNotFound(NotFound): message = _("User %(user_id)s could not be found.") class ProjectNotFound(NotFound): message = _("Project %(project_id)s could not be found.") class ProjectMembershipNotFound(NotFound): message = _("User %(user_id)s is not a member of project %(project_id)s.") class UserRoleNotFound(NotFound): message = _("Role %(role_id)s could not be found.") class StorageRepositoryNotFound(NotFound): message = _("Cannot find SR to read/write VDI.") class NetworkInUse(NovaException): message = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): message = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): message = _("Network %(network_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): message = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): message = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): message = _("No networks defined.") class NetworkNotFoundForProject(NotFound): message = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkHostNotSet(NovaException): message = _("Host is not set to the network (%(network_id)s).") class NetworkBusy(NovaException): message = _("Network %(network)s has active ports, cannot delete.") class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): message = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_id)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): message = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForHost(FixedIpNotFound): message = _("Host %(host)s has zero fixed ips.") class FixedIpNotFoundForNetwork(FixedIpNotFound): message = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): message = _("Fixed IP address %(address)s is already in use.") class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): message = _("Zero fixed ips could be found.") class FloatingIpNotFound(NotFound): message = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): message = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): message = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): message = _("Floating ip not found for host %(host)s.") class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") class FloatingIpAssociated(NovaException): message = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): message = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): message = _("Interface %(interface)s not found.") class KeypairNotFound(NotFound): message = _("Keypair %(name)s not found for user %(user_id)s") class CertificateNotFound(NotFound): message = _("Certificate %(certificate_id)s not found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): message = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class AuthTokenNotFound(NotFound): message = _("Auth token %(token)s could not be found.") class AccessKeyNotFound(NotFound): message = _("Access Key %(access_key)s could not be found.") class QuotaNotFound(NotFound): message = _("Quota could not be found") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class SecurityGroupNotFound(NotFound): message = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): message = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): message = _("Console pool %(pool_id)s could not be found.") class ConsolePoolNotFoundForHostType(NotFound): message = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): message = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): message = _("Console for instance %(instance_id)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): message = _("Console for instance %(instance_id)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): message = _("Invalid console type %(console_type)s ") class NoInstanceTypesFound(NotFound): message = _("Zero instance types found.") class InstanceTypeNotFound(NotFound): message = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): message = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): message = _("Flavor %(flavor_id)s could not be found.") class CellNotFound(NotFound): message = _("Cell %(cell_id)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_id)s has no metadata with " "key %(metadata_key)s.") class InstanceTypeExtraSpecsNotFound(NotFound): message = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class LDAPObjectNotFound(NotFound): message = _("LDAP object could not be found") class LDAPUserNotFound(LDAPObjectNotFound): message = _("LDAP user %(user_id)s could not be found.") class LDAPGroupNotFound(LDAPObjectNotFound): message = _("LDAP group %(group_id)s could not be found.") class LDAPGroupMembershipNotFound(NotFound): message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): message = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): message = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): message = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): message = _("Action not allowed.") class GlobalRoleNotAllowed(NotAllowed): message = _("Unable to use global role %(role_id)s") class ImageRotationNotAllowed(NovaException): message = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): message = _("Rotation param is required for backup image_type") #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class UserExists(Duplicate): message = _("User %(user)s already exists.") class LDAPUserExists(UserExists): message = _("LDAP user %(user)s already exists.") class LDAPGroupExists(Duplicate): message = _("LDAP group %(group)s already exists.") class LDAPMembershipExists(Duplicate): message = _("User %(uid)s is already a member of " "the group %(group_dn)s") class ProjectExists(Duplicate): message = _("Project %(project)s already exists.") class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") class InstanceTypeExists(Duplicate): message = _("Instance Type %(name)s already exists.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(name)s already exists.") class InvalidSharedStorage(NovaException): message = _("%(path)s is on shared storage: %(reason)s") class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(NovaException): message = _("Malformed message body: %(reason)s") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameSize(NovaException): message = _("When resizing, instances must change size!") class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") class ZoneRequestError(NovaException): message = _("1 or more Zones could not complete the request") class InstanceTypeMemoryTooSmall(NovaException): message = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): message = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): message = _("Insufficient free memory on compute node to start %(uuid)s.") class CouldNotFetchMetrics(NovaException): message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") class NoValidHost(NovaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(NovaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(NovaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class AggregateError(NovaException): message = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): message = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(Duplicate): message = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostConflict(Duplicate): message = _("Host %(host)s already member of another aggregate.") class AggregateHostExists(Duplicate): message = _("Aggregate %(aggregate_id)s already has host %(host)s.") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(NovaException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class InstanceTypeCreateFailed(NovaException): message = _("Unable to create instance type") class SolidFireAPIException(NovaException): message = _("Bad response from SolidFire API") class SolidFireAPIStatusException(SolidFireAPIException): message = _("Error in SolidFire API response: status=%(status)s") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class DuplicateVlan(Duplicate): message = _("Detected existing vlan with id %(vlan)d") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class InvalidInstanceIDMalformed(Invalid): message = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): message = _("Could not fetch image %(image)s")
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5538_1
crossvul-python_data_good_3692_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or user.get('enabled', True) == False: try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3692_0
crossvul-python_data_bad_5539_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import itertools import webob.exc from nova.openstack.common import excutils from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ # TODO(sandy): Find a way to import nova.notifier.api so we don't have # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( # TODO(johannes): Also, it would be nice to use # utils.save_and_reraise_exception() without an import loop def inner(f): def wrapped(*args, **kw): try: return f(*args, **kw) except Exception, e: with excutils.save_and_reraise_exception(): if notifier: payload = dict(args=args, exception=e) payload.update(kw) # Use a temp vars so we don't shadow # our outer definitions. temp_level = level if not temp_level: temp_level = notifier.ERROR temp_type = event_type if not temp_type: # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. temp_type = f.__name__ context = get_context_from_function_and_args(f, args, kw) notifier.notify(context, publisher_id, temp_type, temp_level, payload) return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.message super(NovaException, self).__init__(message) class EC2APIError(NovaException): message = _("Unknown") def __init__(self, message=None, code=None): self.msg = message self.code = code outstr = '%s' % message super(EC2APIError, self).__init__(outstr) class DBError(NovaException): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) class DeprecatedConfig(NovaException): message = _("Fatal call to deprecated config %(msg)s") class DecryptionFailure(NovaException): message = _("Failed to decrypt text") class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): message = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): message = _("Connection to glance host %(host)s:%(port)s failed: " "%(reason)s") class NotAuthorized(NovaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(NovaException): message = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot") + ": %(reason)s" class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") class VolumeAttached(Invalid): message = _("Volume %(volume_id)s is still attached, detach volume first.") class InvalidKeypair(Invalid): message = _("Keypair data is invalid") class SfJsonEncodeFailure(NovaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidVolumeType(Invalid): message = _("Invalid volume type") + ": %(reason)s" class InvalidVolume(Invalid): message = _("Invalid volume") + ": %(reason)s" class InvalidMetadata(Invalid): message = _("Invalid metadata") + ": %(reason)s" class InvalidMetadataSize(Invalid): message = _("Invalid metadata size") + ": %(reason)s" class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): message = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): message = _("Invalid cidr %(cidr)s.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAggregateAction(Invalid): message = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): message = _("Group not valid. Reason: %(reason)s") class InvalidSortKey(Invalid): message = _("Sort key supplied was not valid.") class InstanceInvalidState(Invalid): message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") class InstanceNotInRescueMode(Invalid): message = _("Instance %(instance_id)s is not in rescue mode") class InstanceNotReady(Invalid): message = _("Instance %(instance_id)s is not ready") class InstanceSuspendFailure(Invalid): message = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): message = _("Failed to resume server") + ": %(reason)s." class InstanceRebootFailure(Invalid): message = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): message = _("Failed to terminate instance") + ": %(reason)s" class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class ComputeResourcesUnavailable(ServiceUnavailable): message = _("Insufficient compute resources.") class ComputeServiceUnavailable(ServiceUnavailable): message = _("Compute service is unavailable at this time.") class UnableToMigrateToSelf(Invalid): message = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class InvalidHypervisorType(Invalid): message = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): message = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): message = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") class DevicePathInUse(Invalid): message = _("The supplied device path (%(path)s) is in use.") class DeviceIsBusy(Invalid): message = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): message = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): message = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): message = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): message = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): message = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): message = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): message = _("Ec2 id %(ec2_id)s is unacceptable.") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class ConstraintNotMet(NovaException): message = _("Constraint not met.") code = 412 class NotFound(NovaException): message = _("Resource could not be found.") code = 404 class VirtDriverNotFound(NotFound): message = _("Could not find driver for connection_type %(name)s") class PersistentVolumeFileNotFound(NotFound): message = _("Volume %(volume_id)s persistence file could not be found.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class SfAccountNotFound(NotFound): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class VolumeIsBusy(NovaException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(NovaException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class ISCSITargetCreateFailed(NovaException): message = _("Failed to create iscsi target for volume %(volume_id)s.") class ISCSITargetRemoveFailed(NovaException): message = _("Failed to remove iscsi target for volume %(volume_id)s.") class DiskNotFound(NotFound): message = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): message = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ImageNotFoundEC2(ImageNotFound): message = _("Image %(image_id)s could not be found. The nova EC2 API " "assigns image ids dynamically when they are listed for the " "first time. Have you listed image ids since adding this " "image?") class ProjectNotFound(NotFound): message = _("Project %(project_id)s could not be found.") class StorageRepositoryNotFound(NotFound): message = _("Cannot find SR to read/write VDI.") class NetworkInUse(NovaException): message = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): message = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): message = _("Network %(network_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): message = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): message = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): message = _("No networks defined.") class NetworkNotFoundForProject(NotFound): message = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkHostNotSet(NovaException): message = _("Host is not set to the network (%(network_id)s).") class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") class PortInUse(NovaException): message = _("Port %(port_id)s is still in use.") class PortNotFound(NotFound): message = _("Port %(port_id)s could not be found.") class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): message = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): message = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForNetwork(FixedIpNotFound): message = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): message = _("Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s.") class FixedIpAssociatedWithMultipleInstances(NovaException): message = _("More than one instance is associated with fixed ip address " "'%(address)s'.") class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): message = _("Zero fixed ips could be found.") #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass class FloatingIpExists(Duplicate): message = _("Floating ip %(address)s already exists.") class FloatingIpNotFound(NotFound): message = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): message = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): message = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): message = _("Floating ip not found for host %(host)s.") class FloatingIpMultipleFoundForAddress(NovaException): message = _("Multiple floating ips are found for address %(address)s.") class FloatingIpPoolNotFound(NotFound): message = _("Floating ip pool not found.") safe = True class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") safe = True class FloatingIpAssociated(NovaException): message = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): message = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): message = _("Interface %(interface)s not found.") class CannotDisassociateAutoAssignedFloatingIP(NovaException): message = _("Cannot disassociate auto assigined floating ip") class KeypairNotFound(NotFound): message = _("Keypair %(name)s not found for user %(user_id)s") class CertificateNotFound(NotFound): message = _("Certificate %(certificate_id)s not found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): message = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): message = _("Quota could not be found") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(NovaException): message = _("Quota exceeded for resources: %(overs)s") class SecurityGroupNotFound(NotFound): message = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): message = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): message = _("Console pool %(pool_id)s could not be found.") class ConsolePoolNotFoundForHostType(NotFound): message = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): message = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): message = _("Invalid console type %(console_type)s ") class InstanceTypeNotFound(NotFound): message = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): message = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorAccessNotFound(NotFound): message = _("Flavor access not found for %(flavor_id) / " "%(project_id) combination.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no metadata with " "key %(metadata_key)s.") class InstanceSystemMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no system metadata with " "key %(metadata_key)s.") class InstanceTypeExtraSpecsNotFound(NotFound): message = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): message = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): message = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): message = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): message = _("Action not allowed.") class ImageRotationNotAllowed(NovaException): message = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): message = _("Rotation param is required for backup image_type") class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") class InstanceTypeExists(Duplicate): message = _("Instance Type %(name)s already exists.") class FlavorAccessExists(Duplicate): message = _("Flavor access alreay exists for flavor %(flavor_id)s " "and project %(project_id)s combination.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(name)s already exists.") class InvalidSharedStorage(NovaException): message = _("%(path)s is not on shared storage: %(reason)s") class InvalidLocalStorage(NovaException): message = _("%(path)s is not on local storage: %(reason)s") class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(NovaException): message = _("Malformed message body: %(reason)s") # NOTE(johannes): NotFound should only be used when a 404 error is # appropriate to be returned class ConfigNotFound(NovaException): message = _("Could not find config at %(path)s") class PasteAppNotFound(NovaException): message = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameFlavor(NovaException): message = _("When resizing, instances must change flavor!") class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") class InstanceTypeMemoryTooSmall(NovaException): message = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): message = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): message = _("Insufficient free memory on compute node to start %(uuid)s.") class CouldNotFetchMetrics(NovaException): message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") class NoValidHost(NovaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(NovaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(NovaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class TooManyInstances(QuotaError): message = _("Quota exceeded for %(overs)s: Requested %(req)s," " but already used %(used)d of %(allowed)d %(resource)s") class VolumeSizeTooLarge(QuotaError): message = _("Maximum volume size exceeded") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") class FloatingIpLimitExceeded(QuotaError): message = _("Maximum number of floating ips exceeded") class MetadataLimitExceeded(QuotaError): message = _("Maximum number of metadata items exceeds %(allowed)d") class OnsetFileLimitExceeded(QuotaError): message = _("Personality file limit exceeded") class OnsetFilePathLimitExceeded(QuotaError): message = _("Personality file path too long") class OnsetFileContentLimitExceeded(QuotaError): message = _("Personality file content too long") class KeypairLimitExceeded(QuotaError): message = _("Maximum number of key pairs exceeded") class SecurityGroupLimitExceeded(QuotaError): message = _("Maximum number of security groups or rules exceeded") class AggregateError(NovaException): message = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): message = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(Duplicate): message = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostExists(Duplicate): message = _("Aggregate %(aggregate_id)s already has host %(host)s.") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(NovaException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class VolumeBackendAPIException(NovaException): message = _("Bad or unexpected response from the storage volume " "backend API: %(data)s") class NfsException(NovaException): message = _("Unknown NFS exception") class NfsNoSharesMounted(NotFound): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(NotFound): message = _("There is no share which can host %(volume_size)sG") class InstanceTypeCreateFailed(NovaException): message = _("Unable to create instance type") class InstancePasswordSetFailed(NovaException): message = _("Failed to set admin password on %(instance)s " "because %(reason)s") safe = True class SolidFireAPIException(NovaException): message = _("Bad response from SolidFire API") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class DuplicateVlan(Duplicate): message = _("Detected existing vlan with id %(vlan)d") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class MarkerNotFound(NotFound): message = _("Marker %(marker)s could not be found.") class InvalidInstanceIDMalformed(Invalid): message = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): message = _("Could not fetch image %(image_id)s") class TaskAlreadyRunning(NovaException): message = _("Task %(task_name)s is already running on host %(host)s") class TaskNotRunning(NovaException): message = _("Task %(task_name)s is not running on host %(host)s") class InstanceIsLocked(InstanceInvalidState): message = _("Instance %(instance_uuid)s is locked") class ConfigDriveMountFailed(NovaException): message = _("Could not mount vfat config drive. %(operation)s failed. " "Error: %(error)s") class ConfigDriveUnknownFormat(NovaException): message = _("Unknown config drive format %(format)s. Select one of " "iso9660 or vfat.") class InstanceUserDataTooLarge(NovaException): message = _("User data too large. User data must be no larger than " "%(maxsize)s bytes once base64 encoded. Your data is " "%(length)d bytes") class InstanceUserDataMalformed(NovaException): message = _("User data needs to be valid base 64.") class UnexpectedTaskStateError(NovaException): message = _("unexpected task state: expecting %(expected)s but " "the actual state is %(actual)s") class CryptoCAFileNotFound(FileNotFound): message = _("The CA file for %(project)s could not be found") class CryptoCRLFileNotFound(FileNotFound): message = _("The CRL file for %(project)s could not be found") def get_context_from_function_and_args(function, args, kwargs): """Find an arg of type RequestContext and return it. This is useful in a couple of decorators where we don't know much about the function we're wrapping. """ # import here to avoid circularity: from nova import context for arg in itertools.chain(kwargs.values(), args): if isinstance(arg, context.RequestContext): return arg return None
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5539_1
crossvul-python_data_bad_3633_2
# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from webob import exc import webob from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import rpc from nova import utils from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.compute import power_state from xml.dom import minidom LOG = logging.getLogger("nova.api.contrib.security_groups") FLAGS = flags.FLAGS class SecurityGroupController(object): """The Security group API controller for the OpenStack API.""" def __init__(self): self.compute_api = compute.API() super(SecurityGroupController, self).__init__() def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] security_group = self._get_security_group(context, id) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) return exc.HTTPAccepted() def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] if not body: return exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: return exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupController): def create(self, req, body): context = req.environ['nova.context'] if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") return exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id return exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: if 'group_id' in values: if rule['group_id'] == values['group_id']: return True else: is_duplicate = True for key in ('cidr', 'from_port', 'to_port', 'protocol'): if rule[key] != values[key]: is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) if parent_group_id == group_id: msg = _("Parent group id and group id cannot be same") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if ip_protocol and from_port and to_port: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if from_port > to_port: raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") return exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Rule (%s) not found") % id return exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return exc.HTTPAccepted() class Security_groups(extensions.ExtensionDescriptor): def __init__(self): self.compute_api = compute.API() super(Security_groups, self).__init__() def get_name(self): return "SecurityGroups" def get_alias(self): return "security_groups" def get_description(self): return "Security group support" def get_namespace(self): return "http://docs.openstack.org/ext/securitygroups/api/v1.1" def get_updated(self): return "2011-07-21T00:00:00+00:00" def _addSecurityGroup(self, input_dict, req, instance_id): context = req.environ['nova.context'] try: body = input_dict['addSecurityGroup'] group_name = body['name'] instance_id = int(instance_id) except ValueError: msg = _("Server id should be integer") raise exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: self.compute_api.add_security_group(context, instance_id, group_name) except exception.SecurityGroupNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: return exc.HTTPBadRequest(explanation=unicode(exp)) return exc.HTTPAccepted() def _removeSecurityGroup(self, input_dict, req, instance_id): context = req.environ['nova.context'] try: body = input_dict['removeSecurityGroup'] group_name = body['name'] instance_id = int(instance_id) except ValueError: msg = _("Server id should be integer") raise exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: self.compute_api.remove_security_group(context, instance_id, group_name) except exception.SecurityGroupNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: return exc.HTTPBadRequest(explanation=unicode(exp)) return exc.HTTPAccepted() def get_actions(self): """Return the actions the extensions adds""" actions = [ extensions.ActionExtension("servers", "addSecurityGroup", self._addSecurityGroup), extensions.ActionExtension("servers", "removeSecurityGroup", self._removeSecurityGroup) ] return actions def get_resources(self): resources = [] metadata = _get_metadata() body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=wsgi.XMLNS_V11), } serializer = wsgi.ResponseSerializer(body_serializers, None) body_deserializers = { 'application/xml': SecurityGroupXMLDeserializer(), } deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController(), deserializer=deserializer, serializer=serializer) resources.append(res) body_deserializers = { 'application/xml': SecurityGroupRulesXMLDeserializer(), } deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController(), deserializer=deserializer, serializer=serializer) resources.append(res) return resources class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def create(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def create(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule def _get_metadata(): metadata = { "attributes": { "security_group": ["id", "tenant_id", "name"], "rule": ["id", "parent_group_id"], "security_group_rule": ["id", "parent_group_id"], } } return metadata
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_2
crossvul-python_data_bad_3698_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.rootwrap import filters filterlist = [ # nova/virt/disk/mount.py: 'kpartx', '-a', device # nova/virt/disk/mount.py: 'kpartx', '-d', device filters.CommandFilter("/sbin/kpartx", "root"), # nova/virt/disk/mount.py: 'tune2fs', '-c', 0, '-i', 0, mapped_device # nova/virt/xenapi/vm_utils.py: "tune2fs", "-O ^has_journal", part_path # nova/virt/xenapi/vm_utils.py: "tune2fs", "-j", partition_path filters.CommandFilter("/sbin/tune2fs", "root"), # nova/virt/disk/mount.py: 'mount', mapped_device, mount_dir # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. filters.CommandFilter("/bin/mount", "root"), # nova/virt/disk/mount.py: 'umount', mapped_device # nova/virt/xenapi/vm_utils.py: 'umount', dev_path filters.CommandFilter("/bin/umount", "root"), # nova/virt/disk/nbd.py: 'qemu-nbd', '-c', device, image # nova/virt/disk/nbd.py: 'qemu-nbd', '-d', device filters.CommandFilter("/usr/bin/qemu-nbd", "root"), # nova/virt/disk/loop.py: 'losetup', '--find', '--show', image # nova/virt/disk/loop.py: 'losetup', '--detach', device filters.CommandFilter("/sbin/losetup", "root"), # nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-i' # nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-m' dev filters.CommandFilter("/usr/bin/guestmount", "root"), # nova/virt/disk/guestfs.py: 'fusermount', 'u', mount_dir filters.CommandFilter("/bin/fusermount", "root"), filters.CommandFilter("/usr/bin/fusermount", "root"), # nova/virt/disk/api.py: 'tee', metadata_path # nova/virt/disk/api.py: 'tee', '-a', keyfile # nova/virt/disk/api.py: 'tee', netfile filters.CommandFilter("/usr/bin/tee", "root"), # nova/virt/disk/api.py: 'mkdir', '-p', sshdir # nova/virt/disk/api.py: 'mkdir', '-p', netdir filters.CommandFilter("/bin/mkdir", "root"), # nova/virt/disk/api.py: 'chown', 'root', sshdir # nova/virt/disk/api.py: 'chown', 'root:root', netdir # nova/virt/libvirt/connection.py: 'chown', os.getuid(), console_log # nova/virt/libvirt/connection.py: 'chown', os.getuid(), console_log # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') # nova/utils.py: 'chown', owner_uid, path filters.CommandFilter("/bin/chown", "root"), # nova/virt/disk/api.py: 'chmod', '700', sshdir # nova/virt/disk/api.py: 'chmod', 755, netdir filters.CommandFilter("/bin/chmod", "root"), # nova/virt/disk/api.py: 'cp', os.path.join(fs... filters.CommandFilter("/bin/cp", "root"), # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... # nova/network/linux_net.py: 'ip', 'link', 'set', interface, "address",.. # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, "address", .. # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' filters.CommandFilter("/sbin/ip", "root"), # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev filters.CommandFilter("/usr/sbin/tunctl", "root"), filters.CommandFilter("/bin/tunctl", "root"), # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... # nova/network/linux_net.py: 'ovs-vsctl', .... filters.CommandFilter("/usr/bin/ovs-vsctl", "root"), # nova/network/linux_net.py: 'ovs-ofctl', .... filters.CommandFilter("/usr/bin/ovs-ofctl", "root"), # nova/virt/libvirt/connection.py: 'dd', "if=%s" % virsh_output, ... filters.CommandFilter("/bin/dd", "root"), # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... filters.CommandFilter("/sbin/iscsiadm", "root"), # nova/virt/xenapi/vm_utils.py: "parted", "--script", ... # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. filters.CommandFilter("/sbin/parted", "root"), filters.CommandFilter("/usr/sbin/parted", "root"), # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s filters.CommandFilter("/sbin/fdisk", "root"), # nova/virt/xenapi/vm_utils.py: "e2fsck", "-f", "-p", partition_path filters.CommandFilter("/sbin/e2fsck", "root"), # nova/virt/xenapi/vm_utils.py: "resize2fs", partition_path filters.CommandFilter("/sbin/resize2fs", "root"), # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd,), '-t', ... filters.CommandFilter("/sbin/iptables-save", "root"), filters.CommandFilter("/usr/sbin/iptables-save", "root"), filters.CommandFilter("/sbin/ip6tables-save", "root"), filters.CommandFilter("/usr/sbin/ip6tables-save", "root"), # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) filters.CommandFilter("/sbin/iptables-restore", "root"), filters.CommandFilter("/usr/sbin/iptables-restore", "root"), filters.CommandFilter("/sbin/ip6tables-restore", "root"), filters.CommandFilter("/usr/sbin/ip6tables-restore", "root"), # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. filters.CommandFilter("/usr/bin/arping", "root"), filters.CommandFilter("/sbin/arping", "root"), # nova/network/linux_net.py: 'route', '-n' # nova/network/linux_net.py: 'route', 'del', 'default', 'gw' # nova/network/linux_net.py: 'route', 'add', 'default', 'gw' # nova/network/linux_net.py: 'route', '-n' # nova/network/linux_net.py: 'route', 'del', 'default', 'gw', old_gw, .. # nova/network/linux_net.py: 'route', 'add', 'default', 'gw', old_gateway filters.CommandFilter("/sbin/route", "root"), # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address filters.CommandFilter("/usr/bin/dhcp_release", "root"), # nova/network/linux_net.py: 'kill', '-9', pid # nova/network/linux_net.py: 'kill', '-HUP', pid filters.KillFilter("/bin/kill", "root", ['-9', '-HUP'], ['/usr/sbin/dnsmasq']), # nova/network/linux_net.py: 'kill', pid filters.KillFilter("/bin/kill", "root", [''], ['/usr/sbin/radvd']), # nova/network/linux_net.py: dnsmasq call filters.DnsmasqFilter("/usr/sbin/dnsmasq", "root"), # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'),.. filters.CommandFilter("/usr/sbin/radvd", "root"), # nova/network/linux_net.py: 'brctl', 'addbr', bridge # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface filters.CommandFilter("/sbin/brctl", "root"), filters.CommandFilter("/usr/sbin/brctl", "root"), # nova/virt/libvirt/utils.py: 'mkswap' # nova/virt/xenapi/vm_utils.py: 'mkswap' filters.CommandFilter("/sbin/mkswap", "root"), # nova/virt/xenapi/vm_utils.py: 'mkfs' filters.CommandFilter("/sbin/mkfs", "root"), # nova/virt/libvirt/utils.py: 'qemu-img' filters.CommandFilter("/usr/bin/qemu-img", "root"), # nova/virt/disk/api.py: 'touch', target filters.CommandFilter("/usr/bin/touch", "root"), # nova/virt/libvirt/connection.py: filters.ReadFileFilter("/etc/iscsi/initiatorname.iscsi"), ]
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3698_0
crossvul-python_data_good_3693_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): user_ref = self.update_user(context, user_id, user) try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The password has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('Password changed for %s, but existing tokens remain ' 'valid' % user_id) return user_ref def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3693_0
crossvul-python_data_good_5235_0
# # Limited command Shell (lshell) # # Copyright (C) 2008-2013 Ignace Mouzannar (ghantoos) <ghantoos@ghantoos.org> # # This file is part of lshell # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import re import os # import lshell specifics from lshell import utils def warn_count(messagetype, command, conf, strict=None, ssh=None): """ Update the warning_counter, log and display a warning to the user """ log = conf['logpath'] if not ssh: if strict: conf['warning_counter'] -= 1 if conf['warning_counter'] < 0: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) log.critical('*** Kicked out') sys.exit(1) else: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) sys.stderr.write('*** You have %s warning(s) left,' ' before getting kicked out.\n' % conf['warning_counter']) log.error('*** User warned, counter: %s' % conf['warning_counter']) sys.stderr.write('This incident has been reported.\n') else: if not conf['quiet']: log.critical('*** forbidden %s: %s' % (messagetype, command)) # if you are here, means that you did something wrong. Return 1. return 1, conf def check_path(line, conf, completion=None, ssh=None, strict=None): """ Check if a path is entered in the line. If so, it checks if user are allowed to see this path. If user is not allowed, it calls warn_count. In case of completion, it only returns 0 or 1. """ allowed_path_re = str(conf['path'][0]) denied_path_re = str(conf['path'][1][:-1]) # split line depending on the operators sep = re.compile(r'\ |;|\||&') line = line.strip() line = sep.split(line) for item in line: # remove potential quotes or back-ticks item = re.sub(r'^["\'`]|["\'`]$', '', item) # remove potential $(), ${}, `` item = re.sub(r'^\$[\(\{]|[\)\}]$', '', item) # if item has been converted to something other than a string # or an int, reconvert it to a string if type(item) not in ['str', 'int']: item = str(item) # replace "~" with home path item = os.path.expanduser(item) # expand shell wildcards using "echo" # i know, this a bit nasty... if re.findall('\$|\*|\?', item): # remove quotes if available item = re.sub("\"|\'", "", item) import subprocess p = subprocess.Popen("`which echo` %s" % item, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cout = p.stdout try: item = cout.readlines()[0].decode('utf8').split(' ')[0] item = item.strip() item = os.path.expandvars(item) except IndexError: conf['logpath'].critical('*** Internal error: command not ' 'executed') return 1, conf tomatch = os.path.realpath(item) if os.path.isdir(tomatch) and tomatch[-1] != '/': tomatch += '/' match_allowed = re.findall(allowed_path_re, tomatch) if denied_path_re: match_denied = re.findall(denied_path_re, tomatch) else: match_denied = None # if path not allowed # case path executed: warn, and return 1 # case completion: return 1 if not match_allowed or match_denied: if not completion: ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) return 1, conf if not completion: if not re.findall(allowed_path_re, os.getcwd() + '/'): ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) os.chdir(conf['home_path']) conf['promptprint'] = utils.updateprompt(os.getcwd(), conf) return 1, conf return 0, conf def check_secure(line, conf, strict=None, ssh=None): """This method is used to check the content on the typed command. Its purpose is to forbid the user to user to override the lshell command restrictions. The forbidden characters are placed in the 'forbidden' variable. Feel free to update the list. Emptying it would be quite useless..: ) A warning counter has been added, to kick out of lshell a user if he is warned more than X time (X being the 'warning_counter' variable). """ # store original string oline = line # strip all spaces/tabs line = line.strip() # init return code returncode = 0 # This logic is kept crudely simple on purpose. # At most we might match the same stanza twice # (for e.g. "'a'", 'a') but the converse would # require detecting single quotation stanzas # nested within double quotes and vice versa relist = re.findall(r'[^=]\"(.+)\"', line) relist2 = re.findall(r'[^=]\'(.+)\'', line) relist = relist + relist2 for item in relist: if os.path.exists(item): ret_check_path, conf = check_path(item, conf, strict=strict) returncode += ret_check_path # parse command line for control characters, and warn user if re.findall(r'[\x01-\x1F\x7F]', oline): ret, conf = warn_count('control char', oline, conf, strict=strict, ssh=ssh) return ret, conf for item in conf['forbidden']: # allow '&&' and '||' even if singles are forbidden if item in ['&', '|']: if re.findall("[^\%s]\%s[^\%s]" % (item, item, item), line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf else: if item in line: ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf # check if the line contains $(foo) executions, and check them executions = re.findall('\$\([^)]+[)]', line) for item in executions: # recurse on check_path ret_check_path, conf = check_path(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_path # recurse on check_secure ret_check_secure, conf = check_secure(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check for executions using back quotes '`' executions = re.findall('\`[^`]+[`]', line) for item in executions: ret_check_secure, conf = check_secure(item[1:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check if the line contains ${foo=bar}, and check them curly = re.findall('\$\{[^}]+[}]', line) for item in curly: # split to get variable only, and remove last character "}" if re.findall(r'=|\+|\?|\-', item): variable = re.split('=|\+|\?|\-', item, 1) else: variable = item ret_check_path, conf = check_path(variable[1][:-1], conf, strict=strict) returncode += ret_check_path # if unknown commands where found, return 1 and don't execute the line if returncode > 0: return 1, conf # in case the $(foo) or `foo` command passed the above tests elif line.startswith('$(') or line.startswith('`'): return 0, conf # in case ';', '|' or '&' are not forbidden, check if in line lines = [] # corrected by Alojzij Blatnik #48 # test first character if line[0] in ["&", "|", ";"]: start = 1 else: start = 0 # split remaining command line for i in range(1, len(line)): # in case \& or \| or \; don't split it if line[i] in ["&", "|", ";"] and line[i - 1] != "\\": # if there is more && or || skip it if start != i: lines.append(line[start:i]) start = i + 1 # append remaining command line if start != len(line): lines.append(line[start:len(line)]) # remove trailing parenthesis line = re.sub('\)$', '', line) for separate_line in lines: separate_line = " ".join(separate_line.split()) splitcmd = separate_line.strip().split(' ') command = splitcmd[0] if len(splitcmd) > 1: cmdargs = splitcmd else: cmdargs = None # in case of a sudo command, check in sudo_commands list if allowed if command == 'sudo': if type(cmdargs) == list: # allow the -u (user) flag if cmdargs[1] == '-u' and cmdargs: sudocmd = cmdargs[3] else: sudocmd = cmdargs[1] if sudocmd not in conf['sudo_commands'] and cmdargs: ret, conf = warn_count('sudo command', oline, conf, strict=strict, ssh=ssh) return ret, conf # if over SSH, replaced allowed list with the one of overssh if ssh: conf['allowed'] = conf['overssh'] # for all other commands check in allowed list if command not in conf['allowed'] and command: ret, conf = warn_count('command', command, conf, strict=strict, ssh=ssh) return ret, conf return 0, conf
./CrossVul/dataset_final_sorted/CWE-264/py/good_5235_0
crossvul-python_data_good_3771_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import os try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False import routes import webob from glance.api.middleware import context from glance.api.v1 import router import glance.common.client from glance.registry.api import v1 as rserver from glance.tests import utils VERBOSE = False DEBUG = False class FakeRegistryConnection(object): def __init__(self, *args, **kwargs): pass def connect(self): return True def close(self): return True def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank("/" + url.lstrip("/")) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() server = rserver.API(mapper) # NOTE(markwash): we need to pass through context auth information if # we have it. if 'X-Auth-Token' in self.req.headers: api = utils.FakeAuthMiddleware(server) else: api = context.UnauthenticatedContextMiddleware(server) webob_res = self.req.get_response(api) return utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def stub_out_registry_and_store_server(stubs, base_dir): """ Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeSendFile(object): def __init__(self, req): self.req = req def sendfile(self, o, i, offset, nbytes): os.lseek(i, offset, os.SEEK_SET) prev_len = len(self.req.body) self.req.body += os.read(i, nbytes) return len(self.req.body) - prev_len class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() self.stub_force_sendfile = kwargs.get('stub_force_sendfile', SENDFILE_SUPPORTED) def connect(self): return True def close(self): return True def _clean_url(self, url): #TODO(bcwaldon): Fix the hack that strips off v1 return url.replace('/v1', '', 1) if url.startswith('/v1') else url def putrequest(self, method, url): self.req = webob.Request.blank(self._clean_url(url)) if self.stub_force_sendfile: fake_sendfile = FakeSendFile(self.req) stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), \ 'Content-Length and Transfer-Encoding are mutually exclusive' def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(self._clean_url(url)) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if (client.port == DEFAULT_API_PORT and client.host == '0.0.0.0'): return FakeGlanceConnection elif (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.source.app_iter: yield i def fake_sendable(self, body): force = getattr(self, 'stub_force_sendfile', None) if force is None: return self._stub_orig_sendable(body) else: if force: assert glance.common.client.SENDFILE_SUPPORTED return force stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) setattr(glance.common.client.BaseClient, '_stub_orig_sendable', glance.common.client.BaseClient._sendable) stubs.Set(glance.common.client.BaseClient, '_sendable', fake_sendable) def stub_out_registry_server(stubs, **kwargs): """ Mocks calls to 127.0.0.1 on 9191 for testing so that a real Glance Registry server does not need to be up and running """ def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 if (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.response.app_iter: yield i stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3771_1
crossvul-python_data_good_3691_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref, expires=old_token_ref['expires'])) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3691_0
crossvul-python_data_good_5538_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to compute resources (e.g. guest vms, networking and storage of vms, and compute hosts on which they run).""" import functools import re import time import novaclient import webob.exc from nova import block_device from nova.compute import aggregate_states from nova.compute import instance_types from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states from nova.db import base from nova import exception from nova import flags import nova.image from nova import log as logging from nova import network from nova.openstack.common import cfg import nova.policy from nova import quota from nova import rpc from nova.scheduler import api as scheduler_api from nova import utils from nova import volume LOG = logging.getLogger(__name__) find_host_timeout_opt = cfg.StrOpt('find_host_timeout', default=30, help='Timeout after NN seconds when looking for a host.') FLAGS = flags.FLAGS FLAGS.register_opt(find_host_timeout_opt) flags.DECLARE('consoleauth_topic', 'nova.consoleauth') def check_instance_state(vm_state=None, task_state=None): """Decorator to check VM and/or task state before entry to API functions. If the instance is in the wrong state, the wrapper will raise an exception. """ if vm_state is not None and not isinstance(vm_state, set): vm_state = set(vm_state) if task_state is not None and not isinstance(task_state, set): task_state = set(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if vm_state is not None and instance['vm_state'] not in vm_state: raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance['uuid'], state=instance['vm_state'], method=f.__name__) if (task_state is not None and instance['task_state'] not in task_state): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state'], method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer def wrap_check_policy(func): """Check corresponding policy prior of wrapped method to execution""" @functools.wraps(func) def wrapped(self, context, target, *args, **kwargs): check_policy(context, func.__name__, target) return func(self, context, target, *args, **kwargs) return wrapped def check_policy(context, action, target): _action = 'compute:%s' % action nova.policy.enforce(context, _action, target) class BaseAPI(base.Base): """Base API class.""" def __init__(self, **kwargs): super(BaseAPI, self).__init__(**kwargs) def _cast_or_call_compute_message(self, rpc_method, compute_method, context, instance=None, host=None, params=None): """Generic handler for RPC casts and calls to compute. :param rpc_method: RPC method to use (rpc.call or rpc.cast) :param compute_method: Compute manager method to call :param context: RequestContext of caller :param instance: The instance object to use to find host to send to Can be None to not include instance_uuid in args :param host: Optional host to send to instead of instance['host'] Must be specified if 'instance' is None :param params: Optional dictionary of arguments to be passed to the compute worker :returns: None """ if not params: params = {} if not host: if not instance: raise exception.Error(_("No compute host specified")) host = instance['host'] if not host: raise exception.Error(_("Unable to find host for " "Instance %s") % instance['uuid']) queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) if instance: params['instance_uuid'] = instance['uuid'] kwargs = {'method': compute_method, 'args': params} return rpc_method(context, queue, kwargs) def _cast_compute_message(self, *args, **kwargs): """Generic handler for RPC casts to compute.""" self._cast_or_call_compute_message(rpc.cast, *args, **kwargs) def _call_compute_message(self, *args, **kwargs): """Generic handler for RPC calls to compute.""" return self._cast_or_call_compute_message(rpc.call, *args, **kwargs) @staticmethod def _cast_scheduler_message(context, args): """Generic handler for RPC calls to the scheduler.""" rpc.cast(context, FLAGS.scheduler_topic, args) class API(BaseAPI): """API for interacting with the compute manager.""" def __init__(self, image_service=None, network_api=None, volume_api=None, **kwargs): self.image_service = (image_service or nova.image.get_default_image_service()) self.network_api = network_api or network.API() self.volume_api = volume_api or volume.API() self.sgh = utils.import_object(FLAGS.security_group_handler) super(API, self).__init__(**kwargs) def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ if injected_files is None: return limit = quota.allowed_injected_files(context, len(injected_files)) if len(injected_files) > limit: raise exception.QuotaError(code="OnsetFileLimitExceeded") path_limit = quota.allowed_injected_file_path_bytes(context) for path, content in injected_files: if len(path) > path_limit: raise exception.QuotaError(code="OnsetFilePathLimitExceeded") content_limit = quota.allowed_injected_file_content_bytes( context, len(content)) if len(content) > content_limit: code = "OnsetFileContentLimitExceeded" raise exception.QuotaError(code=code) def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" if not metadata: metadata = {} num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id msg = _("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals() LOG.warn(msg) raise exception.QuotaError(code="MetadataLimitExceeded") # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for k, v in metadata.iteritems(): if len(k) > 255 or len(v) > 255: pid = context.project_id msg = _("Quota exceeded for %(pid)s, metadata property " "key or value too long") % locals() LOG.warn(msg) raise exception.QuotaError(code="MetadataLimitExceeded") def _check_requested_networks(self, context, requested_networks): """ Check if the networks requested belongs to the project and the fixed IP address for each network provided is within same the network block """ if requested_networks is None: return self.network_api.validate_networks(context, requested_networks) def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, reservation_id=None, create_instance_here=False, scheduler_hints=None): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation.""" if not metadata: metadata = {} if not display_description: display_description = '' if not security_group: security_group = 'default' if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count if not metadata: metadata = {} block_device_mapping = block_device_mapping or [] num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id if num_instances <= 0: msg = _("Cannot run any more instances of this type.") else: msg = (_("Can only run %s more instances of this type.") % num_instances) LOG.warn(_("Quota exceeded for %(pid)s," " tried to run %(min_count)s instances. " + msg) % locals()) raise exception.QuotaError(code="InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) self._check_requested_networks(context, requested_networks) (image_service, image_id) = nova.image.get_image_service(context, image_href) image = image_service.show(context, image_id) if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() config_drive_id = None if config_drive and config_drive is not True: # config_drive is volume id config_drive, config_drive_id = None, config_drive os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] architecture = None if 'properties' in image and 'arch' in image['properties']: architecture = image['properties']['arch'] vm_mode = None if 'properties' in image and 'vm_mode' in image['properties']: vm_mode = image['properties']['vm_mode'] # If instance doesn't have auto_disk_config overridden by request, use # whatever the image indicates if auto_disk_config is None: if ('properties' in image and 'auto_disk_config' in image['properties']): auto_disk_config = utils.bool_from_str( image['properties']['auto_disk_config']) if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) LOG.debug(_("Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s") % locals()) if kernel_id: image_service.show(context, kernel_id) if ramdisk_id: image_service.show(context, ramdisk_id) if config_drive_id: image_service.show(context, config_drive_id) self.ensure_default_security_group(context) if key_data is None and key_name: key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') root_device_name = block_device.properties_root_device_name( image['properties']) # NOTE(vish): We have a legacy hack to allow admins to specify hosts # via az using az:host. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. host = None if availability_zone: availability_zone, _x, host = availability_zone.partition(':') if not availability_zone: availability_zone = FLAGS.default_schedule_zone if context.is_admin and host: filter_properties = {'force_hosts': [host]} else: filter_properties = {} filter_properties['scheduler_hints'] = scheduler_hints base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive_id': config_drive_id or '', 'config_drive': config_drive or '', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'root_gb': instance_type['root_gb'], 'ephemeral_gb': instance_type['ephemeral_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'access_ip_v4': access_ip_v4, 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, 'root_device_name': root_device_name, 'progress': 0, 'auto_disk_config': auto_disk_config} LOG.debug(_("Going to run %s instances...") % num_instances) # Validate the correct devices have been specified for bdm in block_device_mapping: # NOTE(vish): For now, just make sure the volumes are accessible. snapshot_id = bdm.get('snapshot_id') volume_id = bdm.get('volume_id') if volume_id is not None: try: self.volume_api.get(context, volume_id) except Exception: raise exception.InvalidBDMVolume(id=volume_id) elif snapshot_id is not None: try: self.volume_api.get_snapshot(context, snapshot_id) except Exception: raise exception.InvalidBDMSnapshot(id=snapshot_id) if create_instance_here: instance = self.create_db_entry_for_new_instance( context, instance_type, image, base_options, security_group, block_device_mapping) # Tells scheduler we created the instance already. base_options['uuid'] = instance['uuid'] rpc_method = rpc.cast else: # We need to wait for the scheduler to create the instance # DB entries, because the instance *could* be # created in # a child zone. rpc_method = rpc.call # TODO(comstud): We should use rpc.multicall when we can # retrieve the full instance dictionary from the scheduler. # Otherwise, we could exceed the AMQP max message size limit. # This would require the schedulers' schedule_run_instances # methods to return an iterator vs a list. instances = self._schedule_run_instance( rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties) if create_instance_here: return ([instance], reservation_id) return (instances, reservation_id) @staticmethod def _volume_size(instance_type, virtual_name): size = 0 if virtual_name == 'swap': size = instance_type.get('swap', 0) elif block_device.is_ephemeral(virtual_name): num = block_device.ephemeral_num(virtual_name) # TODO(yamahata): ephemeralN where N > 0 # Only ephemeral0 is allowed for now because InstanceTypes # table only allows single local disk, ephemeral_gb. # In order to enhance it, we need to add a new columns to # instance_types table. if num > 0: return 0 size = instance_type.get('ephemeral_gb') return size def _update_image_block_device_mapping(self, elevated_context, instance_type, instance_id, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ instance_type = (instance_type or instance_types.get_default_instance_type()) for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue if not block_device.is_swap_or_ephemeral(virtual_name): continue size = self._volume_size(instance_type, virtual_name) if size == 0: continue values = { 'instance_id': instance_id, 'device_name': bdm['device'], 'virtual_name': virtual_name, 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) def _update_block_device_mapping(self, elevated_context, instance_type, instance_id, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ LOG.debug(_("block_device_mapping %s"), block_device_mapping) for bdm in block_device_mapping: assert 'device_name' in bdm values = {'instance_id': instance_id} for key in ('device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device'): values[key] = bdm.get(key) virtual_name = bdm.get('virtual_name') if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): size = self._volume_size(instance_type, virtual_name) if size == 0: continue values['volume_size'] = size # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'volume_id', 'snapshot_id', 'volume_id', 'volume_size', 'virtual_name'): values[k] = None self.db.block_device_mapping_update_or_create(elevated_context, values) #NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, base_options, security_group, block_device_mapping): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). This is called by the scheduler after a location for the instance has been determined. """ elevated = context.elevated() if security_group is None: security_group = ['default'] if not isinstance(security_group, list): security_group = [security_group] security_groups = [] for security_group_name in security_group: group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) base_options.setdefault('launch_index', 0) instance = self.db.instance_create(context, base_options) instance_id = instance['id'] instance_uuid = instance['uuid'] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_uuid, security_group_id) # BlockDeviceMapping table self._update_image_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('mappings', [])) self._update_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('block_device_mapping', [])) # override via command line option self._update_block_device_mapping(elevated, instance_type, instance_id, block_device_mapping) # Set sane defaults if not specified updates = {} display_name = instance.get('display_name') if display_name is None: display_name = self._default_display_name(instance_id) hostname = instance.get('hostname') if hostname is None: hostname = display_name updates['display_name'] = display_name updates['hostname'] = utils.sanitize_hostname(hostname) updates['vm_state'] = vm_states.BUILDING updates['task_state'] = task_states.SCHEDULING if (image['properties'].get('mappings', []) or image['properties'].get('block_device_mapping', []) or block_device_mapping): updates['shutdown_terminate'] = False instance = self.update(context, instance, **updates) return instance def _default_display_name(self, instance_id): return "Server %s" % instance_id def _schedule_run_instance(self, rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): """Send a run_instance request to the schedulers for processing.""" pid = context.project_id uid = context.user_id LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") % locals()) request_spec = { 'image': utils.to_primitive(image), 'instance_properties': base_options, 'instance_type': instance_type, 'num_instances': num_instances, 'block_device_mapping': block_device_mapping, 'security_group': security_group, } return rpc_method(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "request_spec": request_spec, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks, "is_first_time": True, "filter_properties": filter_properties}}) def _check_create_policies(self, context, availability_zone, requested_networks, block_device_mapping): """Check policies for create().""" target = {'project_id': context.project_id, 'user_id': context.user_id, 'availability_zone': availability_zone} check_policy(context, 'create', target) if requested_networks: check_policy(context, 'create:attach_network', target) if block_device_mapping: check_policy(context, 'create:attach_volume', target) def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None): """ Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) where instances could be 'None' or a list of instance dicts depending on if we waited for information from the scheduler or not. """ self._check_create_policies(context, availability_zone, requested_networks, block_device_mapping) # We can create the DB entry for the instance here if we're # only going to create 1 instance. # This speeds up API responses for builds # as we don't need to wait for the scheduler. create_instance_here = max_count == 1 (instances, reservation_id) = self._create_instance( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, create_instance_here=create_instance_here, scheduler_hints=scheduler_hints) if create_instance_here or instances is None: return (instances, reservation_id) inst_ret_list = [] for instance in instances: if instance.get('_is_precooked', False): inst_ret_list.append(instance) else: # Scheduler only gives us the 'id'. We need to pull # in the created instances from the DB instance = self.db.instance_get(context, instance['id']) inst_ret_list.append(dict(instance.iteritems())) return (inst_ret_list, reservation_id) def ensure_default_security_group(self, context): """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context """ try: self.db.security_group_get_by_name(context, context.project_id, 'default') except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} self.db.security_group_create(context, values) self.sgh.trigger_security_group_create_refresh(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group.""" security_group = self.db.security_group_get(context, security_group_id) hosts = set() for instance in security_group['instances']: if instance['host'] is not None: hosts.add(instance['host']) for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. """ # First, we get the security group rules that reference these groups as # the grantee.. security_group_rules = set() for group_id in group_ids: security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, group_id)) # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: security_group = self.db.security_group_get( context, rule['parent_group_id']) security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) # ...then we find the hosts where they live... hosts = set() for instance in instances: if instance['host']: hosts.add(instance['host']) # ...and finally we tell these nodes to refresh their view of this # particular security group. for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall""" hosts = [x['host'] for (x, idx) in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'refresh_provider_fw_rules', 'args': {}}) def _is_security_group_associated_with_server(self, security_group, instance_uuid): """Check if the security group is already associated with the instance. If Yes, return True. """ if not security_group: return False instances = security_group.get('instances') if not instances: return False for inst in instances: if (instance_uuid == inst['uuid']): return True return False @wrap_check_policy def add_security_group(self, context, instance, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if self._is_security_group_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_add_security_group(context.elevated(), instance_uuid, security_group['id']) params = {"security_group_id": security_group['id']} # NOTE(comstud): No instance_uuid argument to this compute manager # call self._cast_compute_message('refresh_security_group_rules', context, host=instance['host'], params=params) @wrap_check_policy def remove_security_group(self, context, instance, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if not self._is_security_group_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_remove_security_group(context.elevated(), instance_uuid, security_group['id']) params = {"security_group_id": security_group['id']} # NOTE(comstud): No instance_uuid argument to this compute manager # call self._cast_compute_message('refresh_security_group_rules', context, host=instance['host'], params=params) @wrap_check_policy def update(self, context, instance, **kwargs): """Updates the instance in the datastore. :param context: The security context :param instance: The instance to update :param kwargs: All additional keyword args are treated as data fields of the instance to be updated :returns: None """ rv = self.db.instance_update(context, instance["id"], kwargs) return dict(rv.iteritems()) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.ERROR]) def soft_delete(self, context, instance): """Terminate an instance.""" LOG.debug(_('Going to try to soft delete instance'), instance=instance) if instance['disable_terminate']: return # NOTE(jerdfelt): The compute daemon handles reclaiming instances # that are in soft delete. If there is no host assigned, there is # no daemon to reclaim, so delete it immediately. host = instance['host'] if host: self.update(context, instance, vm_state=vm_states.SOFT_DELETE, task_state=task_states.POWERING_OFF, deleted_at=utils.utcnow()) self._cast_compute_message('power_off_instance', context, instance) else: LOG.warning(_('No host for instance, deleting immediately'), instance=instance) try: self.db.instance_destroy(context, instance['id']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass def _delete(self, context, instance): host = instance['host'] try: if host: self.update(context, instance, task_state=task_states.DELETING, progress=0) self._cast_compute_message('terminate_instance', context, instance) else: self.db.instance_destroy(context, instance['id']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are # allowed but the EC2 API appears to allow from RESCUED and STOPPED # too @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING, vm_states.ERROR, vm_states.RESCUED, vm_states.SHUTOFF, vm_states.STOPPED]) def delete(self, context, instance): """Terminate an instance.""" LOG.debug(_("Going to try to terminate instance"), instance=instance) if instance['disable_terminate']: return self._delete(context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, deleted_at=None) host = instance['host'] if host: self.update(context, instance, task_state=task_states.POWERING_ON) self._cast_compute_message('power_on_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) def force_delete(self, context, instance): """Force delete a previously deleted (but not reclaimed) instance.""" self._delete(context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def stop(self, context, instance, do_cast=True): """Stop an instance.""" instance_uuid = instance["uuid"] LOG.debug(_("Going to try to stop instance"), instance=instance) self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.STOPPING, terminated_at=utils.utcnow(), progress=0) rpc_method = rpc.cast if do_cast else rpc.call self._cast_or_call_compute_message(rpc_method, 'stop_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF]) def start(self, context, instance): """Start an instance.""" vm_state = instance["vm_state"] instance_uuid = instance["uuid"] LOG.debug(_("Going to try to start instance"), instance=instance) if vm_state == vm_states.SHUTOFF: if instance['shutdown_terminate']: LOG.warning(_("Instance %(instance_uuid)s is not " "stopped. (%(vm_state)s") % locals()) return # NOTE(yamahata): nova compute doesn't reap instances # which initiated shutdown itself. So reap it here. self.stop(context, instance, do_cast=False) self.update(context, instance, vm_state=vm_states.STOPPED, task_state=task_states.STARTING) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. self._cast_compute_message('start_instance', context, instance) #NOTE(bcwaldon): no policy check here since it should be rolled in to # search_opts in get_all def get_active_by_window(self, context, begin, end=None, project_id=None): """Get instances that were continuously active over a window.""" return self.db.instance_get_active_by_window(context, begin, end, project_id) #NOTE(bcwaldon): this doesn't really belong in this class def get_instance_type(self, context, instance_type_id): """Get an instance type by instance type id.""" return instance_types.get_instance_type(instance_type_id) def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(ameade): we still need to support integer ids for ec2 if utils.is_uuid_like(instance_id): instance = self.db.instance_get_by_uuid(context, instance_id) else: instance = self.db.instance_get(context, instance_id) check_policy(context, 'get', instance) inst = dict(instance.iteritems()) # NOTE(comstud): Doesn't get returned with iteritems inst['name'] = instance['name'] return inst def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc'): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retrieve all instances in the system. Deleted instances will be returned by default, unless there is a search option that says otherwise. The results will be returned sorted in the order specified by the 'sort_dir' parameter using the key specified in the 'sort_key' parameter. """ #TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, } check_policy(context, "get_all", target) if search_opts is None: search_opts = {} LOG.debug(_("Searching by: %s") % str(search_opts)) # Fixups for the DB call filters = {} def _remap_flavor_filter(flavor_id): try: instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) except exception.FlavorNotFound: raise ValueError() filters['instance_type_id'] = instance_type['id'] def _remap_fixed_ip_filter(fixed_ip): # Turn fixed_ip into a regexp match. Since '.' matches # any character, we need to use regexp escaping for it. filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') # search_option to filter_name mapping. filter_mapping = { 'image': 'image_ref', 'name': 'display_name', 'instance_name': 'name', 'tenant_id': 'project_id', 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} # copy from search_opts, doing various remappings as necessary for opt, value in search_opts.iteritems(): # Do remappings. # Values not in the filter_mapping table are copied as-is. # If remapping is None, option is not copied # If the remapping is a string, it is the filter_name to use try: remap_object = filter_mapping[opt] except KeyError: filters[opt] = value else: # Remaps are strings to translate to, or functions to call # to do the translating as defined by the table above. if isinstance(remap_object, basestring): filters[remap_object] = value else: try: remap_object(value) # We already know we can't match the filter, so # return an empty list except ValueError: return [] inst_models = self._get_instances_by_filters(context, filters, sort_key, sort_dir) # Convert the models to dictionaries instances = [] for inst_model in inst_models: instance = dict(inst_model.iteritems()) # NOTE(comstud): Doesn't get returned by iteritems instance['name'] = inst_model['name'] instances.append(instance) return instances def _get_instances_by_filters(self, context, filters, sort_key, sort_dir): if 'ip6' in filters or 'ip' in filters: res = self.network_api.get_instance_uuids_by_ip_filter(context, filters) # NOTE(jkoelker) It is possible that we will get the same # instance uuid twice (one for ipv4 and ipv6) uuids = set([r['instance_uuid'] for r in res]) filters['uuid'] = uuids return self.db.instance_get_all_by_filters(context, filters, sort_key, sort_dir) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF]) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): """Backup the given instance :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot name = backup_type # daily backups are called 'daily' :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ recv_meta = self._create_image(context, instance, name, 'backup', backup_type=backup_type, rotation=rotation, extra_properties=extra_properties) return recv_meta @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF]) def snapshot(self, context, instance, name, extra_properties=None): """Snapshot the given instance. :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: A dict containing image metadata """ return self._create_image(context, instance, name, 'snapshot', extra_properties=extra_properties) def _create_image(self, context, instance, name, image_type, backup_type=None, rotation=None, extra_properties=None): """Create snapshot or backup for an instance on this host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param name: string for name of the snapshot :param image_type: snapshot | backup :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ instance_uuid = instance['uuid'] if image_type == "snapshot": task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) self.db.instance_test_and_set( context, instance_uuid, 'task_state', [None], task_state) properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), 'image_type': image_type, } sent_meta = {'name': name, 'is_public': False} if image_type == 'backup': properties['backup_type'] = backup_type elif image_type == 'snapshot': min_ram, min_disk = self._get_minram_mindisk_params(context, instance) if min_ram is not None: sent_meta['min_ram'] = min_ram if min_disk is not None: sent_meta['min_disk'] = min_disk properties.update(extra_properties or {}) sent_meta['properties'] = properties recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id'], 'image_type': image_type, 'backup_type': backup_type, 'rotation': rotation} self._cast_compute_message('snapshot_instance', context, instance, params=params) return recv_meta def _get_minram_mindisk_params(self, context, instance): try: #try to get source image of the instance orig_image = self.image_service.show(context, instance['image_ref']) except exception.ImageNotFound: return None, None #disk format of vhd is non-shrinkable if orig_image.get('disk_format') == 'vhd': min_ram = instance['instance_type']['memory_mb'] min_disk = instance['instance_type']['root_gb'] else: #set new image values to the original image values min_ram = orig_image.get('min_ram') min_disk = orig_image.get('min_disk') return min_ram, min_disk @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def reboot(self, context, instance, reboot_type): """Reboot the given instance.""" state = {'SOFT': task_states.REBOOTING, 'HARD': task_states.REBOOTING_HARD}[reboot_type] self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=state) self._cast_compute_message('reboot_instance', context, instance, params={'reboot_type': reboot_type}) def _validate_image_href(self, context, image_href): """Throws an ImageNotFound exception if image_href does not exist.""" (image_service, image_id) = nova.image.get_image_service(context, image_href) image_service.show(context, image_id) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[None, task_states.RESIZE_VERIFY]) def rebuild(self, context, instance, image_href, admin_password, **kwargs): """Rebuild the given instance with the provided attributes.""" self._validate_image_href(context, image_href) files_to_inject = kwargs.pop('files_to_inject', []) self._check_injected_file_quota(context, files_to_inject) metadata = kwargs.get('metadata', {}) self._check_metadata_properties_quota(context, metadata) self.update(context, instance, image_ref=image_href, vm_state=vm_states.REBUILDING, task_state=None, progress=0, **kwargs) rebuild_params = { "new_pass": admin_password, "injected_files": files_to_inject, } self._cast_compute_message('rebuild_instance', context, instance, params=rebuild_params) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[task_states.RESIZE_VERIFY]) def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') self.update(context, instance, vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_REVERTING) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance, host=migration_ref['dest_compute'], params=params) self.db.migration_update(context, migration_ref['id'], {'status': 'reverted'}) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[task_states.RESIZE_VERIFY]) def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance, host=migration_ref['source_compute'], params=params) self.db.migration_update(context, migration_ref['id'], {'status': 'confirmed'}) self.db.instance_update(context, instance['uuid'], {'host': migration_ref['dest_compute'], }) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF], task_state=[None]) def resize(self, context, instance, flavor_id=None, **kwargs): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ current_instance_type = instance['instance_type'] # If flavor_id is not provided, only migrate the instance. if not flavor_id: LOG.debug(_("flavor_id is None. Assuming migration.")) new_instance_type = current_instance_type else: new_instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s") % locals()) if not new_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) # NOTE(markwash): look up the image early to avoid auth problems later image = self.image_service.show(context, instance['image_ref']) current_memory_mb = current_instance_type['memory_mb'] new_memory_mb = new_instance_type['memory_mb'] if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() self.update(context, instance, vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_PREP, progress=0, **kwargs) request_spec = { 'instance_type': new_instance_type, 'num_instances': 1, 'instance_properties': instance} filter_properties = {'ignore_hosts': []} if not FLAGS.allow_resize_to_same_host: filter_properties['ignore_hosts'].append(instance['host']) args = { "topic": FLAGS.compute_topic, "instance_uuid": instance['uuid'], "instance_type_id": new_instance_type['id'], "image": image, "update_db": False, "request_spec": utils.to_primitive(request_spec), "filter_properties": filter_properties, } self._cast_scheduler_message(context, {"method": "prep_resize", "args": args}) @wrap_check_policy def add_fixed_ip(self, context, instance, network_id): """Add fixed_ip from specified network to given instance.""" self._cast_compute_message('add_fixed_ip_to_instance', context, instance, params=dict(network_id=network_id)) @wrap_check_policy def remove_fixed_ip(self, context, instance, address): """Remove fixed_ip from specified network to given instance.""" self._cast_compute_message('remove_fixed_ip_from_instance', context, instance, params=dict(address=address)) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def pause(self, context, instance): """Pause the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.PAUSING) self._cast_compute_message('pause_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.PAUSED]) def unpause(self, context, instance): """Unpause the given instance.""" self.update(context, instance, vm_state=vm_states.PAUSED, task_state=task_states.UNPAUSING) self._cast_compute_message('unpause_instance', context, instance) @wrap_check_policy def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self._call_compute_message("get_diagnostics", context, instance) @wrap_check_policy def get_actions(self, context, instance): """Retrieve actions for the given instance.""" return self.db.instance_get_actions(context, instance['uuid']) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.RESCUED], task_state=[None, task_states.RESIZE_VERIFY]) def suspend(self, context, instance): """Suspend the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING) self._cast_compute_message('suspend_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SUSPENDED]) def resume(self, context, instance): """Resume the given instance.""" self.update(context, instance, vm_state=vm_states.SUSPENDED, task_state=task_states.RESUMING) self._cast_compute_message('resume_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED], task_state=[None, task_states.RESIZE_VERIFY]) def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING) rescue_params = { "rescue_password": rescue_password } self._cast_compute_message('rescue_instance', context, instance, params=rescue_params) @wrap_check_policy @check_instance_state(vm_state=[vm_states.RESCUED]) def unrescue(self, context, instance): """Unrescue the given instance.""" self.update(context, instance, vm_state=vm_states.RESCUED, task_state=task_states.UNRESCUING) self._cast_compute_message('unrescue_instance', context, instance) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE]) def set_admin_password(self, context, instance, password=None): """Set the root/admin password for the given instance.""" self.update(context, instance, task_state=task_states.UPDATING_PASSWORD) params = {"new_pass": password} self._cast_compute_message('set_admin_password', context, instance, params=params) @wrap_check_policy def inject_file(self, context, instance, path, file_contents): """Write a file to the given instance.""" params = {'path': path, 'file_contents': file_contents} self._cast_compute_message('inject_file', context, instance, params=params) @wrap_check_policy def get_vnc_console(self, context, instance, console_type): """Get a url to an instance Console.""" connect_info = self._call_compute_message('get_vnc_console', context, instance, params={"console_type": console_type}) rpc.call(context, '%s' % FLAGS.consoleauth_topic, {'method': 'authorize_console', 'args': {'token': connect_info['token'], 'console_type': console_type, 'host': connect_info['host'], 'port': connect_info['port'], 'internal_access_path': connect_info['internal_access_path']}}) return {'url': connect_info['access_url']} @wrap_check_policy def get_console_output(self, context, instance, tail_length=None): """Get console output for an an instance.""" params = {'tail_length': tail_length} return self._call_compute_message('get_console_output', context, instance, params=params) @wrap_check_policy def lock(self, context, instance): """Lock the given instance.""" self._cast_compute_message('lock_instance', context, instance) @wrap_check_policy def unlock(self, context, instance): """Unlock the given instance.""" self._cast_compute_message('unlock_instance', context, instance) @wrap_check_policy def get_lock(self, context, instance): """Return the boolean state of given instance's lock.""" return self.get(context, instance['uuid'])['locked'] @wrap_check_policy def reset_network(self, context, instance): """Reset networking on the instance.""" self._cast_compute_message('reset_network', context, instance) @wrap_check_policy def inject_network_info(self, context, instance): """Inject network info for the instance.""" self._cast_compute_message('inject_network_info', context, instance) @wrap_check_policy def attach_volume(self, context, instance, volume_id, device): """Attach an existing volume to an existing instance.""" if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device): raise exception.InvalidDevicePath(path=device) volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume) self.volume_api.reserve_volume(context, volume) params = {"volume_id": volume_id, "mountpoint": device} self._cast_compute_message('attach_volume', context, instance, params=params) # FIXME(comstud): I wonder if API should pull in the instance from # the volume ID via volume API and pass it and the volume object here def detach_volume(self, context, volume_id): """Detach a volume from an instance.""" instance = self.db.volume_get_instance(context.elevated(), volume_id) if not instance: raise exception.VolumeUnattached(volume_id=volume_id) check_policy(context, 'detach_volume', instance) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) params = {'volume_id': volume_id} self._cast_compute_message('detach_volume', context, instance, params=params) return instance @wrap_check_policy def associate_floating_ip(self, context, instance, address): """Makes calls to network_api to associate_floating_ip. :param address: is a string floating ip address """ instance_uuid = instance['uuid'] # TODO(tr3buchet): currently network_info doesn't contain floating IPs # in its info, if this changes, the next few lines will need to # accommodate the info containing floating as well as fixed ip # addresses nw_info = self.network_api.get_instance_nw_info(context.elevated(), instance) if not nw_info: raise exception.FixedIpNotFoundForInstance( instance_id=instance_uuid) ips = [ip for ip in nw_info[0].fixed_ips()] if not ips: raise exception.FixedIpNotFoundForInstance( instance_id=instance_uuid) # TODO(tr3buchet): this will associate the floating IP with the # first fixed_ip (lowest id) an instance has. This should be # changed to support specifying a particular fixed_ip if # multiple exist. if len(ips) > 1: msg = _('multiple fixedips exist, using the first: %s') LOG.warning(msg, ips[0]['address']) self.network_api.associate_floating_ip(context, floating_address=address, fixed_address=ips[0]['address']) self.network_api.invalidate_instance_cache(context.elevated(), instance) @wrap_check_policy def get_instance_metadata(self, context, instance): """Get all metadata associated with an instance.""" rv = self.db.instance_metadata_get(context, instance['id']) return dict(rv.iteritems()) @wrap_check_policy def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance['id'], key) @wrap_check_policy def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: _metadata = self.get_instance_metadata(context, instance) _metadata.update(metadata) self._check_metadata_properties_quota(context, _metadata) self.db.instance_metadata_update(context, instance['id'], _metadata, True) return _metadata def get_instance_faults(self, context, instances): """Get all faults for a list of instance uuids.""" if not instances: return {} for instance in instances: check_policy(context, 'get_instance_faults', instance) uuids = [instance['uuid'] for instance in instances] return self.db.instance_fault_get_by_instance_uuids(context, uuids) class HostAPI(BaseAPI): """Sub-set of the Compute Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self._call_compute_message("set_host_enabled", context, host=host, params={"enabled": enabled}) def host_power_action(self, context, host, action): """Reboots, shuts down or powers up the host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self._call_compute_message("host_power_action", context, host=host, params={"action": action}) def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self._call_compute_message("host_maintenance_mode", context, host=host, params={"host": host, "mode": mode}) class AggregateAPI(base.Base): """Sub-set of the Compute Manager API for managing host aggregates.""" def __init__(self, **kwargs): super(AggregateAPI, self).__init__(**kwargs) def create_aggregate(self, context, aggregate_name, availability_zone): """Creates the model for the aggregate.""" zones = [s.availability_zone for s in self.db.service_get_all_by_topic(context, FLAGS.compute_topic)] if availability_zone in zones: values = {"name": aggregate_name, "availability_zone": availability_zone} aggregate = self.db.aggregate_create(context, values) return dict(aggregate.iteritems()) else: raise exception.InvalidAggregateAction(action='create_aggregate', aggregate_id="'N/A'", reason='invalid zone') def get_aggregate(self, context, aggregate_id): """Get an aggregate by id.""" aggregate = self.db.aggregate_get(context, aggregate_id) return self._get_aggregate_info(context, aggregate) def get_aggregate_list(self, context): """Get all the aggregates for this zone.""" aggregates = self.db.aggregate_get_all(context, read_deleted="no") return [self._get_aggregate_info(context, a) for a in aggregates] def update_aggregate(self, context, aggregate_id, values): """Update the properties of an aggregate.""" aggregate = self.db.aggregate_update(context, aggregate_id, values) return self._get_aggregate_info(context, aggregate) def update_aggregate_metadata(self, context, aggregate_id, metadata): """Updates the aggregate metadata. If a key is set to None, it gets removed from the aggregate metadata. """ # As a first release of the host aggregates blueprint, this call is # pretty dumb, in the sense that interacts only with the model. # In later releasses, updating metadata may trigger virt actions like # the setup of shared storage, or more generally changes to the # underlying hypervisor pools. for key in metadata.keys(): if not metadata[key]: try: self.db.aggregate_metadata_delete(context, aggregate_id, key) metadata.pop(key) except exception.AggregateMetadataNotFound, e: LOG.warn(e.message) self.db.aggregate_metadata_add(context, aggregate_id, metadata) return self.get_aggregate(context, aggregate_id) def delete_aggregate(self, context, aggregate_id): """Deletes the aggregate.""" hosts = self.db.aggregate_host_get_all(context, aggregate_id, read_deleted="no") if len(hosts) > 0: raise exception.InvalidAggregateAction(action='delete', aggregate_id=aggregate_id, reason='not empty') self.db.aggregate_delete(context, aggregate_id) def add_host_to_aggregate(self, context, aggregate_id, host): """Adds the host to an aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] # add host, and reflects action in the aggregate operational state aggregate = self.db.aggregate_get(context, aggregate_id) if aggregate.operational_state in [aggregate_states.CREATED, aggregate_states.ACTIVE]: if service.availability_zone != aggregate.availability_zone: raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason='availibility zone mismatch') self.db.aggregate_host_add(context, aggregate_id, host) if aggregate.operational_state == aggregate_states.CREATED: values = {'operational_state': aggregate_states.CHANGING} self.db.aggregate_update(context, aggregate_id, values) queue = self.db.queue_get_for(context, service.topic, host) rpc.cast(context, queue, {"method": "add_aggregate_host", "args": {"aggregate_id": aggregate_id, "host": host}, }) return self.get_aggregate(context, aggregate_id) else: invalid = {aggregate_states.CHANGING: 'setup in progress', aggregate_states.DISMISSED: 'aggregate deleted', aggregate_states.ERROR: 'aggregate in error', } if aggregate.operational_state in invalid.keys(): raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason=invalid[aggregate.operational_state]) def remove_host_from_aggregate(self, context, aggregate_id, host): """Removes host from the aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] aggregate = self.db.aggregate_get(context, aggregate_id) if aggregate.operational_state in [aggregate_states.ACTIVE, aggregate_states.ERROR]: self.db.aggregate_host_delete(context, aggregate_id, host) queue = self.db.queue_get_for(context, service.topic, host) rpc.cast(context, queue, {"method": "remove_aggregate_host", "args": {"aggregate_id": aggregate_id, "host": host}, }) return self.get_aggregate(context, aggregate_id) else: invalid = {aggregate_states.CREATED: 'no hosts to remove', aggregate_states.CHANGING: 'setup in progress', aggregate_states.DISMISSED: 'aggregate deleted', } if aggregate.operational_state in invalid.keys(): raise exception.InvalidAggregateAction( action='remove host', aggregate_id=aggregate_id, reason=invalid[aggregate.operational_state]) def _get_aggregate_info(self, context, aggregate): """Builds a dictionary with aggregate props, metadata and hosts.""" metadata = self.db.aggregate_metadata_get(context, aggregate.id) hosts = self.db.aggregate_host_get_all(context, aggregate.id, read_deleted="no") result = dict(aggregate.iteritems()) result["metadata"] = metadata result["hosts"] = hosts return result
./CrossVul/dataset_final_sorted/CWE-264/py/good_5538_0
crossvul-python_data_good_4833_1
#!/usr/bin/env python2 __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' # Imports {{{ import os, math, json from base64 import b64encode from functools import partial from future_builtins import map from PyQt5.Qt import ( QSize, QSizePolicy, QUrl, Qt, pyqtProperty, QPainter, QPalette, QBrush, QDialog, QColor, QPoint, QImage, QRegion, QIcon, QAction, QMenu, pyqtSignal, QApplication, pyqtSlot, QKeySequence, QMimeData) from PyQt5.QtWebKitWidgets import QWebPage, QWebView from PyQt5.QtWebKit import QWebSettings, QWebElement from calibre.gui2.viewer.flip import SlideFlip from calibre.gui2.shortcuts import Shortcuts from calibre.gui2 import open_url from calibre import prints from calibre.customize.ui import all_viewer_plugins from calibre.gui2.viewer.keys import SHORTCUTS from calibre.gui2.viewer.javascript import JavaScriptLoader from calibre.gui2.viewer.position import PagePosition from calibre.gui2.viewer.config import config, ConfigDialog, load_themes from calibre.gui2.viewer.image_popup import ImagePopup from calibre.gui2.viewer.table_popup import TablePopup from calibre.gui2.viewer.inspector import WebInspector from calibre.gui2.viewer.gestures import GestureHandler from calibre.gui2.viewer.footnote import Footnotes from calibre.ebooks.oeb.display.webview import load_html from calibre.constants import isxp, iswindows, DEBUG, __version__ # }}} def apply_settings(settings, opts): settings.setFontSize(QWebSettings.DefaultFontSize, opts.default_font_size) settings.setFontSize(QWebSettings.DefaultFixedFontSize, opts.mono_font_size) settings.setFontSize(QWebSettings.MinimumLogicalFontSize, opts.minimum_font_size) settings.setFontSize(QWebSettings.MinimumFontSize, opts.minimum_font_size) settings.setFontFamily(QWebSettings.StandardFont, {'serif':opts.serif_family, 'sans':opts.sans_family, 'mono':opts.mono_family}[opts.standard_font]) settings.setFontFamily(QWebSettings.SerifFont, opts.serif_family) settings.setFontFamily(QWebSettings.SansSerifFont, opts.sans_family) settings.setFontFamily(QWebSettings.FixedFont, opts.mono_family) settings.setAttribute(QWebSettings.ZoomTextOnly, True) def apply_basic_settings(settings): # Security settings.setAttribute(QWebSettings.JavaEnabled, False) settings.setAttribute(QWebSettings.PluginsEnabled, False) settings.setAttribute(QWebSettings.JavascriptCanOpenWindows, False) settings.setAttribute(QWebSettings.JavascriptCanAccessClipboard, False) settings.setAttribute(QWebSettings.LocalContentCanAccessFileUrls, False) # ensure javascript cannot read from local files # PrivateBrowsing disables console messages # settings.setAttribute(QWebSettings.PrivateBrowsingEnabled, True) settings.setAttribute(QWebSettings.NotificationsEnabled, False) settings.setThirdPartyCookiePolicy(QWebSettings.AlwaysBlockThirdPartyCookies) # Miscellaneous settings.setAttribute(QWebSettings.LinksIncludedInFocusChain, True) settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True) class Document(QWebPage): # {{{ page_turn = pyqtSignal(object) mark_element = pyqtSignal(QWebElement) settings_changed = pyqtSignal() animated_scroll_done_signal = pyqtSignal() def set_font_settings(self, opts): settings = self.settings() apply_settings(settings, opts) def do_config(self, parent=None): d = ConfigDialog(self.shortcuts, parent) if d.exec_() == QDialog.Accepted: opts = config().parse() self.apply_settings(opts) def apply_settings(self, opts): with self.page_position: self.set_font_settings(opts) self.set_user_stylesheet(opts) self.misc_config(opts) self.settings_changed.emit() self.after_load() def __init__(self, shortcuts, parent=None, debug_javascript=False): QWebPage.__init__(self, parent) self.setObjectName("py_bridge") self.in_paged_mode = False # Use this to pass arbitrary JSON encodable objects between python and # javascript. In python get/set the value as: self.bridge_value. In # javascript, get/set the value as: py_bridge.value self.bridge_value = None self.first_load = True self.jump_to_cfi_listeners = set() self.debug_javascript = debug_javascript self.anchor_positions = {} self.index_anchors = set() self.current_language = None self.loaded_javascript = False self.js_loader = JavaScriptLoader( dynamic_coffeescript=self.debug_javascript) self.in_fullscreen_mode = False self.math_present = False self.setLinkDelegationPolicy(self.DelegateAllLinks) self.scroll_marks = [] self.shortcuts = shortcuts pal = self.palette() pal.setBrush(QPalette.Background, QColor(0xee, 0xee, 0xee)) self.setPalette(pal) self.page_position = PagePosition(self) settings = self.settings() # Fonts self.all_viewer_plugins = tuple(all_viewer_plugins()) for pl in self.all_viewer_plugins: pl.load_fonts() opts = config().parse() self.set_font_settings(opts) apply_basic_settings(settings) self.set_user_stylesheet(opts) self.misc_config(opts) # Load javascript self.mainFrame().javaScriptWindowObjectCleared.connect( self.add_window_objects) self.turn_off_internal_scrollbars() def turn_off_internal_scrollbars(self): mf = self.mainFrame() mf.setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff) mf.setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff) def set_user_stylesheet(self, opts): brules = ['background-color: %s !important'%opts.background_color] if opts.background_color else ['background-color: white'] prefix = ''' body { %s } '''%('; '.join(brules)) if opts.text_color: prefix += '\n\nbody, p, div { color: %s !important }'%opts.text_color raw = prefix + opts.user_css raw = '::selection {background:#ffff00; color:#000;}\n'+raw data = 'data:text/css;charset=utf-8;base64,' data += b64encode(raw.encode('utf-8')) self.settings().setUserStyleSheetUrl(QUrl(data)) def findText(self, q, flags): if self.hyphenatable: q = unicode(q) hyphenated_q = self.javascript( 'hyphenate_text(%s, "%s")' % (json.dumps(q, ensure_ascii=False), self.loaded_lang), typ='string') if hyphenated_q and QWebPage.findText(self, hyphenated_q, flags): return True return QWebPage.findText(self, q, flags) def misc_config(self, opts): self.hyphenate = opts.hyphenate self.hyphenate_default_lang = opts.hyphenate_default_lang self.do_fit_images = opts.fit_images self.page_flip_duration = opts.page_flip_duration self.enable_page_flip = self.page_flip_duration > 0.1 self.font_magnification_step = opts.font_magnification_step self.wheel_flips_pages = opts.wheel_flips_pages self.wheel_scroll_fraction = opts.wheel_scroll_fraction self.line_scroll_fraction = opts.line_scroll_fraction self.tap_flips_pages = opts.tap_flips_pages self.line_scrolling_stops_on_pagebreaks = opts.line_scrolling_stops_on_pagebreaks screen_width = QApplication.desktop().screenGeometry().width() # Leave some space for the scrollbar and some border self.max_fs_width = min(opts.max_fs_width, screen_width-50) self.max_fs_height = opts.max_fs_height self.fullscreen_clock = opts.fullscreen_clock self.fullscreen_scrollbar = opts.fullscreen_scrollbar self.fullscreen_pos = opts.fullscreen_pos self.start_in_fullscreen = opts.start_in_fullscreen self.show_fullscreen_help = opts.show_fullscreen_help self.use_book_margins = opts.use_book_margins self.cols_per_screen_portrait = opts.cols_per_screen_portrait self.cols_per_screen_landscape = opts.cols_per_screen_landscape self.side_margin = opts.side_margin self.top_margin, self.bottom_margin = opts.top_margin, opts.bottom_margin self.show_controls = opts.show_controls self.remember_current_page = opts.remember_current_page self.copy_bookmarks_to_file = opts.copy_bookmarks_to_file self.search_online_url = opts.search_online_url or 'https://www.google.com/search?q={text}' def fit_images(self): if self.do_fit_images and not self.in_paged_mode: self.javascript('setup_image_scaling_handlers()') def add_window_objects(self): self.mainFrame().addToJavaScriptWindowObject("py_bridge", self) self.javascript(''' Object.defineProperty(py_bridge, 'value', { get : function() { return JSON.parse(this._pass_json_value); }, set : function(val) { this._pass_json_value = JSON.stringify(val); } }); ''') self.loaded_javascript = False def load_javascript_libraries(self): if self.loaded_javascript: return self.loaded_javascript = True evaljs = self.mainFrame().evaluateJavaScript self.loaded_lang = self.js_loader(evaljs, self.current_language, self.hyphenate_default_lang) evaljs('window.calibre_utils.setup_epub_reading_system(%s, %s, %s, %s)' % tuple(map(json.dumps, ( 'calibre-desktop', __version__, 'paginated' if self.in_paged_mode else 'scrolling', 'dom-manipulation layout-changes mouse-events keyboard-events'.split())))) mjpath = P(u'viewer/mathjax').replace(os.sep, '/') if iswindows: mjpath = u'/' + mjpath self.javascript(u'window.mathjax.base = %s'%(json.dumps(mjpath, ensure_ascii=False))) for pl in self.all_viewer_plugins: pl.load_javascript(evaljs) evaljs('py_bridge.mark_element.connect(window.calibre_extract.mark)') @pyqtSlot() def animated_scroll_done(self): self.animated_scroll_done_signal.emit() @property def hyphenatable(self): # Qt fails to render soft hyphens correctly on windows xp return not isxp and self.hyphenate and getattr(self, 'loaded_lang', '') and not self.math_present @pyqtSlot() def init_hyphenate(self): if self.hyphenatable: self.javascript('do_hyphenation("%s")'%self.loaded_lang) @pyqtSlot(int) def page_turn_requested(self, backwards): self.page_turn.emit(bool(backwards)) def _pass_json_value_getter(self): val = json.dumps(self.bridge_value) return val def _pass_json_value_setter(self, value): self.bridge_value = json.loads(unicode(value)) _pass_json_value = pyqtProperty(str, fget=_pass_json_value_getter, fset=_pass_json_value_setter) def after_load(self, last_loaded_path=None): self.javascript('window.paged_display.read_document_margins()') self.set_bottom_padding(0) self.fit_images() w = 1 if iswindows else 0 self.math_present = self.javascript('window.mathjax.check_for_math(%d)' % w, bool) self.init_hyphenate() self.javascript('full_screen.save_margins()') if self.in_fullscreen_mode: self.switch_to_fullscreen_mode() if self.in_paged_mode: self.switch_to_paged_mode(last_loaded_path=last_loaded_path) self.read_anchor_positions(use_cache=False) evaljs = self.mainFrame().evaluateJavaScript for pl in self.all_viewer_plugins: pl.run_javascript(evaljs) self.first_load = False def colors(self): self.javascript(''' bs = getComputedStyle(document.body); py_bridge.value = [bs.backgroundColor, bs.color] ''') ans = self.bridge_value return (ans if isinstance(ans, list) else ['white', 'black']) def read_anchor_positions(self, use_cache=True): self.bridge_value = tuple(self.index_anchors) self.javascript(u''' py_bridge.value = book_indexing.anchor_positions(py_bridge.value, %s); '''%('true' if use_cache else 'false')) self.anchor_positions = self.bridge_value if not isinstance(self.anchor_positions, dict): # Some weird javascript error happened self.anchor_positions = {} return {k:tuple(v) for k, v in self.anchor_positions.iteritems()} def switch_to_paged_mode(self, onresize=False, last_loaded_path=None): if onresize and not self.loaded_javascript: return cols_per_screen = self.cols_per_screen_portrait if self.is_portrait else self.cols_per_screen_landscape cols_per_screen = max(1, min(5, cols_per_screen)) self.javascript(''' window.paged_display.use_document_margins = %s; window.paged_display.set_geometry(%d, %d, %d, %d); '''%( ('true' if self.use_book_margins else 'false'), cols_per_screen, self.top_margin, self.side_margin, self.bottom_margin )) force_fullscreen_layout = bool(getattr(last_loaded_path, 'is_single_page', False)) self.update_contents_size_for_paged_mode(force_fullscreen_layout) def update_contents_size_for_paged_mode(self, force_fullscreen_layout=None): # Setup the contents size to ensure that there is a right most margin. # Without this WebKit renders the final column with no margin, as the # columns extend beyond the boundaries (and margin) of body if force_fullscreen_layout is None: force_fullscreen_layout = self.javascript('window.paged_display.is_full_screen_layout', typ=bool) f = 'true' if force_fullscreen_layout else 'false' side_margin = self.javascript('window.paged_display.layout(%s)'%f, typ=int) mf = self.mainFrame() sz = mf.contentsSize() scroll_width = self.javascript('document.body.scrollWidth', int) # At this point sz.width() is not reliable, presumably because Qt # has not yet been updated if scroll_width > self.window_width: sz.setWidth(scroll_width+side_margin) self.setPreferredContentsSize(sz) self.javascript('window.paged_display.fit_images()') @property def column_boundaries(self): if not self.loaded_javascript: return (0, 1) self.javascript(u'py_bridge.value = paged_display.column_boundaries()') return tuple(self.bridge_value) def after_resize(self): if self.in_paged_mode: self.setPreferredContentsSize(QSize()) self.switch_to_paged_mode(onresize=True) self.javascript('if (window.mathjax) window.mathjax.after_resize();') def switch_to_fullscreen_mode(self): self.in_fullscreen_mode = True self.javascript('full_screen.on(%d, %d, %s)'%(self.max_fs_width, self.max_fs_height, 'true' if self.in_paged_mode else 'false')) def switch_to_window_mode(self): self.in_fullscreen_mode = False self.javascript('full_screen.off(%s)'%('true' if self.in_paged_mode else 'false')) @pyqtSlot(str) def debug(self, msg): prints(unicode(msg)) @pyqtSlot(int) def jump_to_cfi_finished(self, job_id): for l in self.jump_to_cfi_listeners: l(job_id) def reference_mode(self, enable): self.javascript(('enter' if enable else 'leave')+'_reference_mode()') def set_reference_prefix(self, prefix): self.javascript('reference_prefix = "%s"'%prefix) def goto(self, ref): self.javascript('goto_reference("%s")'%ref) def goto_bookmark(self, bm): if bm['type'] == 'legacy': bm = bm['pos'] bm = bm.strip() if bm.startswith('>'): bm = bm[1:].strip() self.javascript('scroll_to_bookmark("%s")'%bm) elif bm['type'] == 'cfi': self.page_position.to_pos(bm['pos']) def javascript(self, string, typ=None): ans = self.mainFrame().evaluateJavaScript(string) if typ in {'int', int}: try: return int(ans) except (TypeError, ValueError): return 0 if typ in {'float', float}: try: return float(ans) except (TypeError, ValueError): return 0.0 if typ == 'string': return ans or u'' if typ in {bool, 'bool'}: return bool(ans) return ans def javaScriptConsoleMessage(self, msg, lineno, msgid): if DEBUG or self.debug_javascript: prints(msg) def javaScriptAlert(self, frame, msg): if DEBUG: prints(msg) else: return QWebPage.javaScriptAlert(self, frame, msg) def scroll_by(self, dx=0, dy=0): self.mainFrame().scroll(dx, dy) def scroll_to(self, x=0, y=0): self.mainFrame().setScrollPosition(QPoint(x, y)) def jump_to_anchor(self, anchor): if not self.loaded_javascript: return self.javascript('window.paged_display.jump_to_anchor("%s")'%anchor) def element_ypos(self, elem): try: ans = int(elem.evaluateJavaScript('$(this).offset().top')) except (TypeError, ValueError): raise ValueError('No ypos found') return ans def elem_outer_xml(self, elem): return unicode(elem.toOuterXml()) def bookmark(self): pos = self.page_position.current_pos return {'type':'cfi', 'pos':pos} @property def at_bottom(self): return self.height - self.ypos <= self.window_height @property def at_top(self): return self.ypos <=0 def test(self): pass @property def ypos(self): return self.mainFrame().scrollPosition().y() @property def window_height(self): return self.javascript('window.innerHeight', 'int') @property def window_width(self): return self.javascript('window.innerWidth', 'int') @property def is_portrait(self): return self.window_width < self.window_height @property def xpos(self): return self.mainFrame().scrollPosition().x() @dynamic_property def scroll_fraction(self): def fget(self): if self.in_paged_mode: return self.javascript(''' ans = 0.0; if (window.paged_display) { ans = window.paged_display.current_pos(); } ans;''', typ='float') else: try: return abs(float(self.ypos)/(self.height-self.window_height)) except ZeroDivisionError: return 0. def fset(self, val): if self.in_paged_mode and self.loaded_javascript: self.javascript('paged_display.scroll_to_pos(%f)'%val) else: npos = val * (self.height - self.window_height) if npos < 0: npos = 0 self.scroll_to(x=self.xpos, y=npos) return property(fget=fget, fset=fset) @dynamic_property def page_number(self): ' The page number is the number of the page at the left most edge of the screen (starting from 0) ' def fget(self): if self.in_paged_mode: return self.javascript( 'ans = 0; if (window.paged_display) ans = window.paged_display.column_boundaries()[0]; ans;', typ='int') def fset(self, val): if self.in_paged_mode and self.loaded_javascript: self.javascript('if (window.paged_display) window.paged_display.scroll_to_column(%d)' % int(val)) return True return property(fget=fget, fset=fset) @property def page_dimensions(self): if self.in_paged_mode: return self.javascript( ''' ans = '' if (window.paged_display) ans = window.paged_display.col_width + ':' + window.paged_display.current_page_height; ans;''', typ='string') @property def hscroll_fraction(self): try: return float(self.xpos)/self.width except ZeroDivisionError: return 0. @property def height(self): # Note that document.body.offsetHeight does not include top and bottom # margins on body and in some cases does not include the top margin on # the first element inside body either. See ticket #8791 for an example # of the latter. q = self.mainFrame().contentsSize().height() if q < 0: # Don't know if this is still needed, but it can't hurt j = self.javascript('document.body.offsetHeight', 'int') if j >= 0: q = j return q @property def width(self): return self.mainFrame().contentsSize().width() # offsetWidth gives inaccurate results def set_bottom_padding(self, amount): s = QSize(-1, -1) if amount == 0 else QSize(self.viewportSize().width(), self.height+amount) self.setPreferredContentsSize(s) def extract_node(self): return unicode(self.mainFrame().evaluateJavaScript( 'window.calibre_extract.extract()')) # }}} class DocumentView(QWebView): # {{{ magnification_changed = pyqtSignal(object) DISABLED_BRUSH = QBrush(Qt.lightGray, Qt.Dense5Pattern) gesture_handler = lambda s, e: False last_loaded_path = None def initialize_view(self, debug_javascript=False): self.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform) self.flipper = SlideFlip(self) self.gesture_handler = GestureHandler(self) self.is_auto_repeat_event = False self.debug_javascript = debug_javascript self.shortcuts = Shortcuts(SHORTCUTS, 'shortcuts/viewer') self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)) self._size_hint = QSize(510, 680) self.initial_pos = 0.0 self.to_bottom = False self.document = Document(self.shortcuts, parent=self, debug_javascript=debug_javascript) self.footnotes = Footnotes(self) self.document.settings_changed.connect(self.footnotes.clone_settings) self.setPage(self.document) self.inspector = WebInspector(self, self.document) self.manager = None self._reference_mode = False self._ignore_scrollbar_signals = False self.loading_url = None self.loadFinished.connect(self.load_finished) self.document.linkClicked.connect(self.link_clicked) self.document.linkHovered.connect(self.link_hovered) self.document.selectionChanged[()].connect(self.selection_changed) self.document.animated_scroll_done_signal.connect(self.animated_scroll_done, type=Qt.QueuedConnection) self.document.page_turn.connect(self.page_turn_requested) copy_action = self.copy_action copy_action.setIcon(QIcon(I('edit-copy.png'))) copy_action.triggered.connect(self.copy, Qt.QueuedConnection) d = self.document self.unimplemented_actions = list(map(self.pageAction, [d.DownloadImageToDisk, d.OpenLinkInNewWindow, d.DownloadLinkToDisk, d.OpenImageInNewWindow, d.OpenLink, d.Reload, d.InspectElement])) self.search_online_action = QAction(QIcon(I('search.png')), '', self) self.search_online_action.triggered.connect(self.search_online) self.addAction(self.search_online_action) self.dictionary_action = QAction(QIcon(I('dictionary.png')), _('&Lookup in dictionary'), self) self.dictionary_action.triggered.connect(self.lookup) self.addAction(self.dictionary_action) self.image_popup = ImagePopup(self) self.table_popup = TablePopup(self) self.view_image_action = QAction(QIcon(I('view-image.png')), _('View &image...'), self) self.view_image_action.triggered.connect(self.image_popup) self.view_table_action = QAction(QIcon(I('view.png')), _('View &table...'), self) self.view_table_action.triggered.connect(self.popup_table) self.search_action = QAction(QIcon(I('dictionary.png')), _('&Search for next occurrence'), self) self.search_action.triggered.connect(self.search_next) self.addAction(self.search_action) self.goto_location_action = QAction(_('Go to...'), self) self.goto_location_menu = m = QMenu(self) self.goto_location_actions = a = { 'Next Page': self.next_page, 'Previous Page': self.previous_page, 'Section Top' : partial(self.scroll_to, 0), 'Document Top': self.goto_document_start, 'Section Bottom':partial(self.scroll_to, 1), 'Document Bottom': self.goto_document_end, 'Next Section': self.goto_next_section, 'Previous Section': self.goto_previous_section, } for name, key in [(_('Next Section'), 'Next Section'), (_('Previous Section'), 'Previous Section'), (None, None), (_('Document Start'), 'Document Top'), (_('Document End'), 'Document Bottom'), (None, None), (_('Section Start'), 'Section Top'), (_('Section End'), 'Section Bottom'), (None, None), (_('Next Page'), 'Next Page'), (_('Previous Page'), 'Previous Page')]: if key is None: m.addSeparator() else: m.addAction(name, a[key], self.shortcuts.get_sequences(key)[0]) self.goto_location_action.setMenu(self.goto_location_menu) self.restore_fonts_action = QAction(_('Default font size'), self) self.restore_fonts_action.setCheckable(True) self.restore_fonts_action.triggered.connect(self.restore_font_size) def goto_next_section(self, *args): if self.manager is not None: self.manager.goto_next_section() def goto_previous_section(self, *args): if self.manager is not None: self.manager.goto_previous_section() def goto_document_start(self, *args): if self.manager is not None: self.manager.goto_start() def goto_document_end(self, *args): if self.manager is not None: self.manager.goto_end() @property def copy_action(self): return self.pageAction(self.document.Copy) def animated_scroll_done(self): if self.manager is not None: self.manager.scrolled(self.document.scroll_fraction) def reference_mode(self, enable): self._reference_mode = enable self.document.reference_mode(enable) def goto(self, ref): self.document.goto(ref) def goto_bookmark(self, bm): self.document.goto_bookmark(bm) def config(self, parent=None): self.document.do_config(parent) if self.document.in_fullscreen_mode: self.document.switch_to_fullscreen_mode() self.setFocus(Qt.OtherFocusReason) def load_theme(self, theme_id): themes = load_themes() theme = themes[theme_id] opts = config(theme).parse() self.document.apply_settings(opts) if self.document.in_fullscreen_mode: self.document.switch_to_fullscreen_mode() self.setFocus(Qt.OtherFocusReason) def bookmark(self): return self.document.bookmark() @property def selected_text(self): return self.document.selectedText().replace(u'\u00ad', u'').strip() def copy(self): self.document.triggerAction(self.document.Copy) c = QApplication.clipboard() md = c.mimeData() if iswindows: nmd = QMimeData() nmd.setHtml(md.html().replace(u'\u00ad', '')) md = nmd md.setText(self.selected_text) QApplication.clipboard().setMimeData(md) def selection_changed(self): if self.manager is not None: self.manager.selection_changed(self.selected_text) def _selectedText(self): t = unicode(self.selectedText()).strip() if not t: return u'' if len(t) > 40: t = t[:40] + u'...' t = t.replace(u'&', u'&&') return _("S&earch online for '%s'")%t def popup_table(self): html = self.document.extract_node() self.table_popup(html, QUrl.fromLocalFile(self.last_loaded_path), self.document.font_magnification_step) def contextMenuEvent(self, ev): from_touch = ev.reason() == ev.Other mf = self.document.mainFrame() r = mf.hitTestContent(ev.pos()) img = r.pixmap() elem = r.element() if elem.isNull(): elem = r.enclosingBlockElement() table = None parent = elem while not parent.isNull(): if (unicode(parent.tagName()) == u'table' or unicode(parent.localName()) == u'table'): table = parent break parent = parent.parent() self.image_popup.current_img = img self.image_popup.current_url = r.imageUrl() menu = self.document.createStandardContextMenu() for action in self.unimplemented_actions: menu.removeAction(action) if not img.isNull(): menu.addAction(self.view_image_action) if table is not None: self.document.mark_element.emit(table) menu.addAction(self.view_table_action) text = self._selectedText() if text and img.isNull(): self.search_online_action.setText(text) for x, sc in (('search_online', 'Search online'), ('dictionary', 'Lookup word'), ('search', 'Next occurrence')): ac = getattr(self, '%s_action' % x) menu.addAction(ac.icon(), '%s [%s]' % (unicode(ac.text()), ','.join(self.shortcuts.get_shortcuts(sc))), ac.trigger) if from_touch and self.manager is not None: word = unicode(mf.evaluateJavaScript('window.calibre_utils.word_at_point(%f, %f)' % (ev.pos().x(), ev.pos().y())) or '') if word: menu.addAction(self.dictionary_action.icon(), _('Lookup %s in the dictionary') % word, partial(self.manager.lookup, word)) menu.addAction(self.search_online_action.icon(), _('Search for %s online') % word, partial(self.do_search_online, word)) if not text and img.isNull(): menu.addSeparator() if self.manager.action_back.isEnabled(): menu.addAction(self.manager.action_back) if self.manager.action_forward.isEnabled(): menu.addAction(self.manager.action_forward) menu.addAction(self.goto_location_action) if self.manager is not None: menu.addSeparator() menu.addAction(self.manager.action_table_of_contents) menu.addSeparator() menu.addAction(self.manager.action_font_size_larger) self.restore_fonts_action.setChecked(self.multiplier == 1) menu.addAction(self.restore_fonts_action) menu.addAction(self.manager.action_font_size_smaller) menu.addSeparator() menu.addAction(_('Inspect'), self.inspect) if not text and img.isNull() and self.manager is not None: menu.addSeparator() if (not self.document.show_controls or self.document.in_fullscreen_mode) and self.manager is not None: menu.addAction(self.manager.toggle_toolbar_action) menu.addAction(self.manager.action_full_screen) menu.addSeparator() menu.addAction(self.manager.action_reload) menu.addAction(self.manager.action_quit) for plugin in self.document.all_viewer_plugins: plugin.customize_context_menu(menu, ev, r) if from_touch: from calibre.constants import plugins pi = plugins['progress_indicator'][0] for x in (menu, self.goto_location_menu): if hasattr(pi, 'set_touch_menu_style'): pi.set_touch_menu_style(x) helpt = QAction(QIcon(I('help.png')), _('Show supported touch screen gestures'), menu) helpt.triggered.connect(self.gesture_handler.show_help) menu.insertAction(menu.actions()[0], helpt) else: self.goto_location_menu.setStyle(self.style()) self.context_menu = menu menu.exec_(ev.globalPos()) def inspect(self): self.inspector.show() self.inspector.raise_() self.pageAction(self.document.InspectElement).trigger() def lookup(self, *args): if self.manager is not None: t = unicode(self.selectedText()).strip() if t: self.manager.lookup(t.split()[0]) def search_next(self): if self.manager is not None: t = unicode(self.selectedText()).strip() if t: self.manager.search.set_search_string(t) def search_online(self): t = unicode(self.selectedText()).strip() if t: self.do_search_online(t) def do_search_online(self, text): url = self.document.search_online_url.replace('{text}', QUrl().toPercentEncoding(text)) if not isinstance(url, bytes): url = url.encode('utf-8') open_url(QUrl.fromEncoded(url)) def set_manager(self, manager): self.manager = manager self.scrollbar = manager.horizontal_scrollbar self.scrollbar.valueChanged[(int)].connect(self.scroll_horizontally) def scroll_horizontally(self, amount): self.document.scroll_to(y=self.document.ypos, x=amount) @property def scroll_pos(self): return (self.document.ypos, self.document.ypos + self.document.window_height) @property def viewport_rect(self): # (left, top, right, bottom) of the viewport in document co-ordinates # When in paged mode, left and right are the numbers of the columns # at the left edge and *after* the right edge of the viewport d = self.document if d.in_paged_mode: try: l, r = d.column_boundaries except ValueError: l, r = (0, 1) else: l, r = d.xpos, d.xpos + d.window_width return (l, d.ypos, r, d.ypos + d.window_height) def link_hovered(self, link, text, context): link, text = unicode(link), unicode(text) if link: self.setCursor(Qt.PointingHandCursor) else: self.unsetCursor() def link_clicked(self, url): if self.manager is not None: self.manager.link_clicked(url) def sizeHint(self): return self._size_hint @dynamic_property def scroll_fraction(self): def fget(self): return self.document.scroll_fraction def fset(self, val): self.document.scroll_fraction = float(val) return property(fget=fget, fset=fset) @property def hscroll_fraction(self): return self.document.hscroll_fraction @property def content_size(self): return self.document.width, self.document.height @dynamic_property def current_language(self): def fget(self): return self.document.current_language def fset(self, val): self.document.current_language = val return property(fget=fget, fset=fset) def search(self, text, backwards=False): flags = self.document.FindBackward if backwards else self.document.FindFlags(0) found = self.document.findText(text, flags) if found and self.document.in_paged_mode: self.document.javascript('paged_display.snap_to_selection()') return found def path(self): return os.path.abspath(unicode(self.url().toLocalFile())) def load_path(self, path, pos=0.0): self.initial_pos = pos self.last_loaded_path = path # This is needed otherwise percentage margins on body are not correctly # evaluated in read_document_margins() in paged mode. self.document.setPreferredContentsSize(QSize()) def callback(lu): self.loading_url = lu if self.manager is not None: self.manager.load_started() load_html(path, self, codec=getattr(path, 'encoding', 'utf-8'), mime_type=getattr(path, 'mime_type', 'text/html'), pre_load_callback=callback) entries = set() for ie in getattr(path, 'index_entries', []): if ie.start_anchor: entries.add(ie.start_anchor) if ie.end_anchor: entries.add(ie.end_anchor) self.document.index_anchors = entries def initialize_scrollbar(self): if getattr(self, 'scrollbar', None) is not None: if self.document.in_paged_mode: self.scrollbar.setVisible(False) return delta = self.document.width - self.size().width() if delta > 0: self._ignore_scrollbar_signals = True self.scrollbar.blockSignals(True) self.scrollbar.setRange(0, delta) self.scrollbar.setValue(0) self.scrollbar.setSingleStep(1) self.scrollbar.setPageStep(int(delta/10.)) self.scrollbar.setVisible(delta > 0) self.scrollbar.blockSignals(False) self._ignore_scrollbar_signals = False def load_finished(self, ok): if self.loading_url is None: # An <iframe> finished loading return self.loading_url = None self.document.load_javascript_libraries() self.document.after_load(self.last_loaded_path) self._size_hint = self.document.mainFrame().contentsSize() scrolled = False if self.to_bottom: self.to_bottom = False self.initial_pos = 1.0 if self.initial_pos > 0.0: scrolled = True self.scroll_to(self.initial_pos, notify=False) self.initial_pos = 0.0 self.update() self.initialize_scrollbar() self.document.reference_mode(self._reference_mode) if self.manager is not None: spine_index = self.manager.load_finished(bool(ok)) if spine_index > -1: self.document.set_reference_prefix('%d.'%(spine_index+1)) if scrolled: self.manager.scrolled(self.document.scroll_fraction, onload=True) if self.flipper.isVisible(): if self.flipper.running: self.flipper.setVisible(False) else: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) @classmethod def test_line(cls, img, y): 'Test if line contains pixels of exactly the same color' start = img.pixel(0, y) for i in range(1, img.width()): if img.pixel(i, y) != start: return False return True def current_page_image(self, overlap=-1): if overlap < 0: overlap = self.height() img = QImage(self.width(), overlap, QImage.Format_ARGB32_Premultiplied) painter = QPainter(img) painter.setRenderHints(self.renderHints()) self.document.mainFrame().render(painter, QRegion(0, 0, self.width(), overlap)) painter.end() return img def find_next_blank_line(self, overlap): img = self.current_page_image(overlap) for i in range(overlap-1, -1, -1): if self.test_line(img, i): self.scroll_by(y=i, notify=False) return self.scroll_by(y=overlap) def previous_page(self): if self.flipper.running and not self.is_auto_repeat_event: return if self.loading_url is not None: return epf = self.document.enable_page_flip and not self.is_auto_repeat_event if self.document.in_paged_mode: loc = self.document.javascript( 'paged_display.previous_screen_location()', typ='int') if loc < 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.manager.previous_document() else: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.document.scroll_to(x=loc, y=0) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return delta_y = self.document.window_height - 25 if self.document.at_top: if self.manager is not None: self.to_bottom = True if epf: self.flipper.initialize(self.current_page_image(), False) self.manager.previous_document() else: opos = self.document.ypos upper_limit = opos - delta_y if upper_limit < 0: upper_limit = 0 if upper_limit < opos: if epf: self.flipper.initialize(self.current_page_image(), forwards=False) self.document.scroll_to(self.document.xpos, upper_limit) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) def next_page(self): if self.flipper.running and not self.is_auto_repeat_event: return if self.loading_url is not None: return epf = self.document.enable_page_flip and not self.is_auto_repeat_event if self.document.in_paged_mode: loc = self.document.javascript( 'paged_display.next_screen_location()', typ='int') if loc < 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() else: if epf: self.flipper.initialize(self.current_page_image()) self.document.scroll_to(x=loc, y=0) if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return window_height = self.document.window_height document_height = self.document.height ddelta = document_height - window_height # print '\nWindow height:', window_height # print 'Document height:', self.document.height delta_y = window_height - 25 if self.document.at_bottom or ddelta <= 0: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() elif ddelta < 25: self.scroll_by(y=ddelta) return else: oopos = self.document.ypos # print 'Original position:', oopos self.document.set_bottom_padding(0) opos = self.document.ypos # print 'After set padding=0:', self.document.ypos if opos < oopos: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() return # oheight = self.document.height lower_limit = opos + delta_y # Max value of top y co-ord after scrolling max_y = self.document.height - window_height # The maximum possible top y co-ord if max_y < lower_limit: padding = lower_limit - max_y if padding == window_height: if self.manager is not None: if epf: self.flipper.initialize(self.current_page_image()) self.manager.next_document() return # print 'Setting padding to:', lower_limit - max_y self.document.set_bottom_padding(lower_limit - max_y) if epf: self.flipper.initialize(self.current_page_image()) # print 'Document height:', self.document.height # print 'Height change:', (self.document.height - oheight) max_y = self.document.height - window_height lower_limit = min(max_y, lower_limit) # print 'Scroll to:', lower_limit if lower_limit > opos: self.document.scroll_to(self.document.xpos, lower_limit) actually_scrolled = self.document.ypos - opos # print 'After scroll pos:', self.document.ypos # print 'Scrolled by:', self.document.ypos - opos self.find_next_blank_line(window_height - actually_scrolled) # print 'After blank line pos:', self.document.ypos if epf: self.flipper(self.current_page_image(), duration=self.document.page_flip_duration) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) # print 'After all:', self.document.ypos def page_turn_requested(self, backwards): if backwards: self.previous_page() else: self.next_page() def scroll_by(self, x=0, y=0, notify=True): old_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) self.document.scroll_by(x, y) new_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if notify and self.manager is not None and new_pos != old_pos: self.manager.scrolled(self.scroll_fraction) def scroll_to(self, pos, notify=True): if self._ignore_scrollbar_signals: return old_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if self.document.in_paged_mode: if isinstance(pos, basestring): self.document.jump_to_anchor(pos) else: self.document.scroll_fraction = pos else: if isinstance(pos, basestring): self.document.jump_to_anchor(pos) else: if pos >= 1: self.document.scroll_to(0, self.document.height) else: y = int(math.ceil( pos*(self.document.height-self.document.window_height))) self.document.scroll_to(0, y) new_pos = (self.document.xpos if self.document.in_paged_mode else self.document.ypos) if notify and self.manager is not None and new_pos != old_pos: self.manager.scrolled(self.scroll_fraction) @dynamic_property def multiplier(self): def fget(self): return self.zoomFactor() def fset(self, val): oval = self.zoomFactor() self.setZoomFactor(val) if val != oval: if self.document.in_paged_mode: self.document.update_contents_size_for_paged_mode() self.magnification_changed.emit(val) return property(fget=fget, fset=fset) def magnify_fonts(self, amount=None): if amount is None: amount = self.document.font_magnification_step with self.document.page_position: self.multiplier += amount return self.document.scroll_fraction def shrink_fonts(self, amount=None): if amount is None: amount = self.document.font_magnification_step if self.multiplier >= amount: with self.document.page_position: self.multiplier -= amount return self.document.scroll_fraction def restore_font_size(self): with self.document.page_position: self.multiplier = 1 return self.document.scroll_fraction def changeEvent(self, event): if event.type() == event.EnabledChange: self.update() return QWebView.changeEvent(self, event) def paintEvent(self, event): painter = QPainter(self) painter.setRenderHints(self.renderHints()) self.document.mainFrame().render(painter, event.region()) if not self.isEnabled(): painter.fillRect(event.region().boundingRect(), self.DISABLED_BRUSH) painter.end() def wheelEvent(self, event): if event.phase() not in (Qt.ScrollUpdate, 0): # 0 is Qt.NoScrollPhase which is not yet available in PyQt return mods = event.modifiers() num_degrees = event.angleDelta().y() // 8 if mods & Qt.CTRL: if self.manager is not None and num_degrees != 0: (self.manager.font_size_larger if num_degrees > 0 else self.manager.font_size_smaller)() return if self.document.in_paged_mode: if abs(num_degrees) < 15: return typ = 'screen' if self.document.wheel_flips_pages else 'col' direction = 'next' if num_degrees < 0 else 'previous' loc = self.document.javascript('paged_display.%s_%s_location()'%( direction, typ), typ='int') if loc > -1: self.document.scroll_to(x=loc, y=0) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) event.accept() elif self.manager is not None: if direction == 'next': self.manager.next_document() else: self.manager.previous_document() event.accept() return if num_degrees < -14: if self.document.wheel_flips_pages: self.next_page() event.accept() return if self.document.at_bottom: self.scroll_by(y=15) # at_bottom can lie on windows if self.manager is not None: self.manager.next_document() event.accept() return elif num_degrees > 14: if self.document.wheel_flips_pages: self.previous_page() event.accept() return if self.document.at_top: if self.manager is not None: self.manager.previous_document() event.accept() return ret = QWebView.wheelEvent(self, event) num_degrees_h = event.angleDelta().x() // 8 vertical = abs(num_degrees) > abs(num_degrees_h) scroll_amount = ((num_degrees if vertical else num_degrees_h)/ 120.0) * .2 * -1 * 8 dim = self.document.viewportSize().height() if vertical else self.document.viewportSize().width() amt = dim * scroll_amount mult = -1 if amt < 0 else 1 if self.document.wheel_scroll_fraction != 100: amt = mult * max(1, abs(int(amt * self.document.wheel_scroll_fraction / 100.))) self.scroll_by(0, amt) if vertical else self.scroll_by(amt, 0) if self.manager is not None: self.manager.scrolled(self.scroll_fraction) return ret def keyPressEvent(self, event): if not self.handle_key_press(event): return QWebView.keyPressEvent(self, event) def paged_col_scroll(self, forward=True, scroll_past_end=True): dir = 'next' if forward else 'previous' loc = self.document.javascript( 'paged_display.%s_col_location()'%dir, typ='int') if loc > -1: self.document.scroll_to(x=loc, y=0) self.manager.scrolled(self.document.scroll_fraction) elif scroll_past_end: (self.manager.next_document() if forward else self.manager.previous_document()) def handle_key_press(self, event): handled = True key = self.shortcuts.get_match(event) func = self.goto_location_actions.get(key, None) if func is not None: self.is_auto_repeat_event = event.isAutoRepeat() try: func() finally: self.is_auto_repeat_event = False elif key == 'Down': if self.document.in_paged_mode: self.paged_col_scroll(scroll_past_end=not self.document.line_scrolling_stops_on_pagebreaks) else: if (not self.document.line_scrolling_stops_on_pagebreaks and self.document.at_bottom): self.manager.next_document() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(y=amt) elif key == 'Up': if self.document.in_paged_mode: self.paged_col_scroll(forward=False, scroll_past_end=not self.document.line_scrolling_stops_on_pagebreaks) else: if (not self.document.line_scrolling_stops_on_pagebreaks and self.document.at_top): self.manager.previous_document() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(y=-amt) elif key == 'Left': if self.document.in_paged_mode: self.paged_col_scroll(forward=False) else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(x=-amt) elif key == 'Right': if self.document.in_paged_mode: self.paged_col_scroll() else: amt = int((self.document.line_scroll_fraction / 100.) * 15) self.scroll_by(x=amt) elif key == 'Back': if self.manager is not None: self.manager.back(None) elif key == 'Forward': if self.manager is not None: self.manager.forward(None) elif event.matches(QKeySequence.Copy): self.copy() else: handled = False return handled def resizeEvent(self, event): if self.manager is not None: self.manager.viewport_resize_started(event) return QWebView.resizeEvent(self, event) def event(self, ev): if self.gesture_handler(ev): return True return QWebView.event(self, ev) def mouseMoveEvent(self, ev): if self.document.in_paged_mode and ev.buttons() & Qt.LeftButton and not self.rect().contains(ev.pos(), True): # Prevent this event from causing WebKit to scroll the viewport # See https://bugs.launchpad.net/bugs/1464862 return return QWebView.mouseMoveEvent(self, ev) def mouseReleaseEvent(self, ev): r = self.document.mainFrame().hitTestContent(ev.pos()) a, url = r.linkElement(), r.linkUrl() if url.isValid() and not a.isNull() and self.manager is not None: fd = self.footnotes.get_footnote_data(a, url) if fd: self.footnotes.show_footnote(fd) self.manager.show_footnote_view() ev.accept() return opos = self.document.ypos if self.manager is not None: prev_pos = self.manager.update_page_number() ret = QWebView.mouseReleaseEvent(self, ev) if self.manager is not None and opos != self.document.ypos: self.manager.scrolled(self.scroll_fraction) self.manager.internal_link_clicked(prev_pos) return ret def follow_footnote_link(self): qurl = self.footnotes.showing_url if qurl and qurl.isValid(): self.link_clicked(qurl) # }}}
./CrossVul/dataset_final_sorted/CWE-264/py/good_4833_1
crossvul-python_data_good_3633_2
# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from webob import exc import webob from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import rpc from nova import utils from nova import quota from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.compute import power_state from xml.dom import minidom LOG = logging.getLogger("nova.api.contrib.security_groups") FLAGS = flags.FLAGS class SecurityGroupController(object): """The Security group API controller for the OpenStack API.""" def __init__(self): self.compute_api = compute.API() super(SecurityGroupController, self).__init__() def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] security_group = self._get_security_group(context, id) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) return exc.HTTPAccepted() def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] if not body: return exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: return exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupController): def create(self, req, body): context = req.environ['nova.context'] if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") return exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id return exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) allowed = quota.allowed_security_group_rules(context, parent_group_id, 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: if 'group_id' in values: if rule['group_id'] == values['group_id']: return True else: is_duplicate = True for key in ('cidr', 'from_port', 'to_port', 'protocol'): if rule[key] != values[key]: is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) if parent_group_id == group_id: msg = _("Parent group id and group id cannot be same") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if ip_protocol and from_port and to_port: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if from_port > to_port: raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") return exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Rule (%s) not found") % id return exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return exc.HTTPAccepted() class Security_groups(extensions.ExtensionDescriptor): def __init__(self): self.compute_api = compute.API() super(Security_groups, self).__init__() def get_name(self): return "SecurityGroups" def get_alias(self): return "security_groups" def get_description(self): return "Security group support" def get_namespace(self): return "http://docs.openstack.org/ext/securitygroups/api/v1.1" def get_updated(self): return "2011-07-21T00:00:00+00:00" def _addSecurityGroup(self, input_dict, req, instance_id): context = req.environ['nova.context'] try: body = input_dict['addSecurityGroup'] group_name = body['name'] instance_id = int(instance_id) except ValueError: msg = _("Server id should be integer") raise exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: self.compute_api.add_security_group(context, instance_id, group_name) except exception.SecurityGroupNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: return exc.HTTPBadRequest(explanation=unicode(exp)) return exc.HTTPAccepted() def _removeSecurityGroup(self, input_dict, req, instance_id): context = req.environ['nova.context'] try: body = input_dict['removeSecurityGroup'] group_name = body['name'] instance_id = int(instance_id) except ValueError: msg = _("Server id should be integer") raise exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: self.compute_api.remove_security_group(context, instance_id, group_name) except exception.SecurityGroupNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: return exc.HTTPBadRequest(explanation=unicode(exp)) return exc.HTTPAccepted() def get_actions(self): """Return the actions the extensions adds""" actions = [ extensions.ActionExtension("servers", "addSecurityGroup", self._addSecurityGroup), extensions.ActionExtension("servers", "removeSecurityGroup", self._removeSecurityGroup) ] return actions def get_resources(self): resources = [] metadata = _get_metadata() body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=wsgi.XMLNS_V11), } serializer = wsgi.ResponseSerializer(body_serializers, None) body_deserializers = { 'application/xml': SecurityGroupXMLDeserializer(), } deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController(), deserializer=deserializer, serializer=serializer) resources.append(res) body_deserializers = { 'application/xml': SecurityGroupRulesXMLDeserializer(), } deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController(), deserializer=deserializer, serializer=serializer) resources.append(res) return resources class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def create(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def create(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule def _get_metadata(): metadata = { "attributes": { "security_group": ["id", "tenant_id", "name"], "rule": ["id", "parent_group_id"], "security_group_rule": ["id", "parent_group_id"], } } return metadata
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_2
crossvul-python_data_bad_5235_0
# # Limited command Shell (lshell) # # Copyright (C) 2008-2013 Ignace Mouzannar (ghantoos) <ghantoos@ghantoos.org> # # This file is part of lshell # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import re import os # import lshell specifics from lshell import utils def warn_count(messagetype, command, conf, strict=None, ssh=None): """ Update the warning_counter, log and display a warning to the user """ log = conf['logpath'] if not ssh: if strict: conf['warning_counter'] -= 1 if conf['warning_counter'] < 0: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) log.critical('*** Kicked out') sys.exit(1) else: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) sys.stderr.write('*** You have %s warning(s) left,' ' before getting kicked out.\n' % conf['warning_counter']) log.error('*** User warned, counter: %s' % conf['warning_counter']) sys.stderr.write('This incident has been reported.\n') else: if not conf['quiet']: log.critical('*** forbidden %s: %s' % (messagetype, command)) # if you are here, means that you did something wrong. Return 1. return 1, conf def check_path(line, conf, completion=None, ssh=None, strict=None): """ Check if a path is entered in the line. If so, it checks if user are allowed to see this path. If user is not allowed, it calls warn_count. In case of completion, it only returns 0 or 1. """ allowed_path_re = str(conf['path'][0]) denied_path_re = str(conf['path'][1][:-1]) # split line depending on the operators sep = re.compile(r'\ |;|\||&') line = line.strip() line = sep.split(line) for item in line: # remove potential quotes or back-ticks item = re.sub(r'^["\'`]|["\'`]$', '', item) # remove potential $(), ${}, `` item = re.sub(r'^\$[\(\{]|[\)\}]$', '', item) # if item has been converted to something other than a string # or an int, reconvert it to a string if type(item) not in ['str', 'int']: item = str(item) # replace "~" with home path item = os.path.expanduser(item) # expand shell wildcards using "echo" # i know, this a bit nasty... if re.findall('\$|\*|\?', item): # remove quotes if available item = re.sub("\"|\'", "", item) import subprocess p = subprocess.Popen("`which echo` %s" % item, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cout = p.stdout try: item = cout.readlines()[0].decode('utf8').split(' ')[0] item = item.strip() item = os.path.expandvars(item) except IndexError: conf['logpath'].critical('*** Internal error: command not ' 'executed') return 1, conf tomatch = os.path.realpath(item) if os.path.isdir(tomatch) and tomatch[-1] != '/': tomatch += '/' match_allowed = re.findall(allowed_path_re, tomatch) if denied_path_re: match_denied = re.findall(denied_path_re, tomatch) else: match_denied = None # if path not allowed # case path executed: warn, and return 1 # case completion: return 1 if not match_allowed or match_denied: if not completion: ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) return 1, conf if not completion: if not re.findall(allowed_path_re, os.getcwd() + '/'): ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) os.chdir(conf['home_path']) conf['promptprint'] = utils.updateprompt(os.getcwd(), conf) return 1, conf return 0, conf def check_secure(line, conf, strict=None, ssh=None): """This method is used to check the content on the typed command. Its purpose is to forbid the user to user to override the lshell command restrictions. The forbidden characters are placed in the 'forbidden' variable. Feel free to update the list. Emptying it would be quite useless..: ) A warning counter has been added, to kick out of lshell a user if he is warned more than X time (X being the 'warning_counter' variable). """ # store original string oline = line # strip all spaces/tabs line = line.strip() # init return code returncode = 0 # This logic is kept crudely simple on purpose. # At most we might match the same stanza twice # (for e.g. "'a'", 'a') but the converse would # require detecting single quotation stanzas # nested within double quotes and vice versa relist = re.findall(r'[^=]\"(.+)\"', line) relist2 = re.findall(r'[^=]\'(.+)\'', line) relist = relist + relist2 for item in relist: if os.path.exists(item): ret_check_path, conf = check_path(item, conf, strict=strict) returncode += ret_check_path # ignore quoted text line = re.sub(r'\"(.+?)\"', '', line) line = re.sub(r'\'(.+?)\'', '', line) if re.findall('[:cntrl:].*\n', line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf for item in conf['forbidden']: # allow '&&' and '||' even if singles are forbidden if item in ['&', '|']: if re.findall("[^\%s]\%s[^\%s]" % (item, item, item), line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf else: if item in line: ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf # check if the line contains $(foo) executions, and check them executions = re.findall('\$\([^)]+[)]', line) for item in executions: # recurse on check_path ret_check_path, conf = check_path(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_path # recurse on check_secure ret_check_secure, conf = check_secure(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check for executions using back quotes '`' executions = re.findall('\`[^`]+[`]', line) for item in executions: ret_check_secure, conf = check_secure(item[1:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check if the line contains ${foo=bar}, and check them curly = re.findall('\$\{[^}]+[}]', line) for item in curly: # split to get variable only, and remove last character "}" if re.findall(r'=|\+|\?|\-', item): variable = re.split('=|\+|\?|\-', item, 1) else: variable = item ret_check_path, conf = check_path(variable[1][:-1], conf, strict=strict) returncode += ret_check_path # if unknown commands where found, return 1 and don't execute the line if returncode > 0: return 1, conf # in case the $(foo) or `foo` command passed the above tests elif line.startswith('$(') or line.startswith('`'): return 0, conf # in case ';', '|' or '&' are not forbidden, check if in line lines = [] # corrected by Alojzij Blatnik #48 # test first character if line[0] in ["&", "|", ";"]: start = 1 else: start = 0 # split remaining command line for i in range(1, len(line)): # in case \& or \| or \; don't split it if line[i] in ["&", "|", ";"] and line[i - 1] != "\\": # if there is more && or || skip it if start != i: lines.append(line[start:i]) start = i + 1 # append remaining command line if start != len(line): lines.append(line[start:len(line)]) # remove trailing parenthesis line = re.sub('\)$', '', line) for separate_line in lines: separate_line = " ".join(separate_line.split()) splitcmd = separate_line.strip().split(' ') command = splitcmd[0] if len(splitcmd) > 1: cmdargs = splitcmd else: cmdargs = None # in case of a sudo command, check in sudo_commands list if allowed if command == 'sudo': if type(cmdargs) == list: # allow the -u (user) flag if cmdargs[1] == '-u' and cmdargs: sudocmd = cmdargs[3] else: sudocmd = cmdargs[1] if sudocmd not in conf['sudo_commands'] and cmdargs: ret, conf = warn_count('sudo command', oline, conf, strict=strict, ssh=ssh) return ret, conf # if over SSH, replaced allowed list with the one of overssh if ssh: conf['allowed'] = conf['overssh'] # for all other commands check in allowed list if command not in conf['allowed'] and command: ret, conf = warn_count('command', command, conf, strict=strict, ssh=ssh) return ret, conf return 0, conf
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5235_0
crossvul-python_data_bad_3772_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import os try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False import routes import webob from glance.api.middleware import context from glance.api.v1 import router import glance.common.client from glance.registry.api import v1 as rserver from glance.tests import utils VERBOSE = False DEBUG = False class FakeRegistryConnection(object): def __init__(self, *args, **kwargs): pass def connect(self): return True def close(self): return True def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank("/" + url.lstrip("/")) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(rserver.API(mapper)) webob_res = self.req.get_response(api) return utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def stub_out_registry_and_store_server(stubs, base_dir): """ Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeSendFile(object): def __init__(self, req): self.req = req def sendfile(self, o, i, offset, nbytes): os.lseek(i, offset, os.SEEK_SET) prev_len = len(self.req.body) self.req.body += os.read(i, nbytes) return len(self.req.body) - prev_len class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() self.stub_force_sendfile = kwargs.get('stub_force_sendfile', SENDFILE_SUPPORTED) def connect(self): return True def close(self): return True def _clean_url(self, url): #TODO(bcwaldon): Fix the hack that strips off v1 return url.replace('/v1', '', 1) if url.startswith('/v1') else url def putrequest(self, method, url): self.req = webob.Request.blank(self._clean_url(url)) if self.stub_force_sendfile: fake_sendfile = FakeSendFile(self.req) stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), \ 'Content-Length and Transfer-Encoding are mutually exclusive' def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(self._clean_url(url)) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if (client.port == DEFAULT_API_PORT and client.host == '0.0.0.0'): return FakeGlanceConnection elif (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.source.app_iter: yield i def fake_sendable(self, body): force = getattr(self, 'stub_force_sendfile', None) if force is None: return self._stub_orig_sendable(body) else: if force: assert glance.common.client.SENDFILE_SUPPORTED return force stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) setattr(glance.common.client.BaseClient, '_stub_orig_sendable', glance.common.client.BaseClient._sendable) stubs.Set(glance.common.client.BaseClient, '_sendable', fake_sendable) stubs.Set(glance.common.client.ImageBodyIterator, '__iter__', fake_image_iter) def stub_out_registry_server(stubs, **kwargs): """ Mocks calls to 127.0.0.1 on 9191 for testing so that a real Glance Registry server does not need to be up and running """ def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 if (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.response.app_iter: yield i stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) stubs.Set(glance.common.client.ImageBodyIterator, '__iter__', fake_image_iter)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3772_1
crossvul-python_data_good_3698_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # # Copyright 2011, Piston Cloud Computing, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to resize, repartition, and modify disk images. Includes injection of SSH PGP keys into authorized_keys file. """ import crypt import json import os import random import re import tempfile from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import cfg from nova import utils from nova.virt.disk import guestfs from nova.virt.disk import loop from nova.virt.disk import nbd LOG = logging.getLogger(__name__) disk_opts = [ cfg.StrOpt('injected_network_template', default='$pybasedir/nova/virt/interfaces.template', help='Template file for injected network'), cfg.ListOpt('img_handlers', default=['loop', 'nbd', 'guestfs'], help='Order of methods used to mount disk images'), # NOTE(yamahata): ListOpt won't work because the command may include a # comma. For example: # # mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to # escape such commas. # cfg.MultiStrOpt('virt_mkfs', default=[ 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs' ' --force --fast --label %(fs_label)s %(target)s', # NOTE(yamahata): vfat case #'windows=mkfs.vfat -n %(fs_label)s %(target)s', ], help='mkfs commands for ephemeral device. ' 'The format is <os_type>=<mkfs command>'), ] FLAGS = flags.FLAGS FLAGS.register_opts(disk_opts) _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None for s in FLAGS.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. # So item.partition('=') doesn't work here os_type, mkfs_command = s.split('=', 1) if os_type: _MKFS_COMMAND[os_type] = mkfs_command if os_type == 'default': _DEFAULT_MKFS_COMMAND = mkfs_command _QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)', re.MULTILINE) def mkfs(os_type, fs_label, target): mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % locals() if mkfs_command: utils.execute(*mkfs_command.split()) def get_image_virtual_size(image): out, _err = utils.execute('qemu-img', 'info', image) m = _QEMU_VIRT_SIZE_REGEX.search(out) return int(m.group(2)) def extend(image, size): """Increase image to size""" # NOTE(MotoKen): check image virtual size before resize virt_size = get_image_virtual_size(image) if virt_size >= size: return utils.execute('qemu-img', 'resize', image, size) # NOTE(vish): attempts to resize filesystem utils.execute('e2fsck', '-fp', image, check_exit_code=False) utils.execute('resize2fs', image, check_exit_code=False) def bind(src, target, instance_name): """Bind device to a filesytem""" if src: utils.execute('touch', target, run_as_root=True) utils.execute('mount', '-o', 'bind', src, target, run_as_root=True) s = os.stat(src) cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev), os.minor(s.st_rdev)) cgroups_path = \ "/sys/fs/cgroup/devices/libvirt/lxc/%s/devices.allow" \ % instance_name utils.execute('tee', cgroups_path, process_input=cgroup_info, run_as_root=True) def unbind(target): if target: utils.execute('umount', target, run_as_root=True) class _DiskImage(object): """Provide operations on a disk image file.""" def __init__(self, image, partition=None, use_cow=False, mount_dir=None): # These passed to each mounter self.image = image self.partition = partition self.mount_dir = mount_dir # Internal self._mkdir = False self._mounter = None self._errors = [] # As a performance tweak, don't bother trying to # directly loopback mount a cow image. self.handlers = FLAGS.img_handlers[:] if use_cow and 'loop' in self.handlers: self.handlers.remove('loop') if not self.handlers: raise exception.Error(_('no capable image handler configured')) @property def errors(self): """Return the collated errors from all operations.""" return '\n--\n'.join([''] + self._errors) @staticmethod def _handler_class(mode): """Look up the appropriate class to use based on MODE.""" for cls in (loop.Mount, nbd.Mount, guestfs.Mount): if cls.mode == mode: return cls raise exception.Error(_("unknown disk image handler: %s") % mode) def mount(self): """Mount a disk image, using the object attributes. The first supported means provided by the mount classes is used. True, or False is returned and the 'errors' attribute contains any diagnostics. """ if self._mounter: raise exception.Error(_('image already mounted')) if not self.mount_dir: self.mount_dir = tempfile.mkdtemp() self._mkdir = True try: for h in self.handlers: mounter_cls = self._handler_class(h) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir) if mounter.do_mount(): self._mounter = mounter break else: LOG.debug(mounter.error) self._errors.append(mounter.error) finally: if not self._mounter: self.umount() # rmdir return bool(self._mounter) def umount(self): """Unmount a disk image from the file system.""" try: if self._mounter: self._mounter.do_umount() finally: if self._mkdir: os.rmdir(self.mount_dir) # Public module functions def inject_data(image, key=None, net=None, metadata=None, admin_password=None, partition=None, use_cow=False): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If partition is not specified it mounts the image as a single partition. """ img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: inject_data_into_fs(img.mount_dir, key, net, metadata, admin_password, utils.execute) finally: img.umount() else: raise exception.Error(img.errors) def inject_files(image, files, partition=None, use_cow=False): """Injects arbitrary files into a disk image""" img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: for (path, contents) in files: _inject_file_into_fs(img.mount_dir, path, contents) finally: img.umount() else: raise exception.Error(img.errors) def setup_container(image, container_dir=None, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. LXC does not support qcow2 images yet. """ try: img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) if img.mount(): return img else: raise exception.Error(img.errors) except Exception, exn: LOG.exception(_('Failed to mount filesystem: %s'), exn) def destroy_container(img): """Destroy the container once it terminates. It will umount the container that is mounted, and delete any linked devices. LXC does not support qcow2 images yet. """ try: if img: img.umount() except Exception, exn: LOG.exception(_('Failed to remove container: %s'), exn) def inject_data_into_fs(fs, key, net, metadata, admin_password, execute): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data """ if key: _inject_key_into_fs(key, fs, execute=execute) if net: _inject_net_into_fs(net, fs, execute=execute) if metadata: _inject_metadata_into_fs(metadata, fs, execute=execute) if admin_password: _inject_admin_password_into_fs(admin_password, fs, execute=execute) def _join_and_check_path_within_fs(fs, *args): '''os.path.join() with safety check for injected file paths. Join the supplied path components and make sure that the resulting path we are injecting into is within the mounted guest fs. Trying to be clever and specifying a path with '..' in it will hit this safeguard. ''' absolute_path, _err = utils.execute('readlink', '-nm', os.path.join(fs, *args), run_as_root=True) if not absolute_path.startswith(os.path.realpath(fs) + '/'): raise exception.Invalid(_('injected file path not valid')) return absolute_path def _inject_file_into_fs(fs, path, contents, append=False): absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) args = [] if append: args.append('-a') args.append(absolute_path) kwargs = dict(process_input=contents, run_as_root=True) utils.execute('tee', *args, **kwargs) def _inject_metadata_into_fs(metadata, fs, execute=None): metadata = dict([(m.key, m.value) for m in metadata]) _inject_file_into_fs(fs, 'meta.js', json.dumps(metadata)) def _inject_key_into_fs(key, fs, execute=None): """Add the given public ssh key to root's authorized_keys. key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh') utils.execute('mkdir', '-p', sshdir, run_as_root=True) utils.execute('chown', 'root', sshdir, run_as_root=True) utils.execute('chmod', '700', sshdir, run_as_root=True) keyfile = os.path.join('root', '.ssh', 'authorized_keys') key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n', ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) def _inject_net_into_fs(net, fs, execute=None): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = _join_and_check_path_within_fs(fs, 'etc', 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net) def _inject_admin_password_into_fs(admin_passwd, fs, execute=None): """Set the root password to admin_passwd admin_password is a root password fs is the path to the base of the filesystem into which to inject the key. This method modifies the instance filesystem directly, and does not require a guest agent running in the instance. """ # The approach used here is to copy the password and shadow # files from the instance filesystem to local files, make any # necessary changes, and then copy them back. admin_user = 'root' fd, tmp_passwd = tempfile.mkstemp() os.close(fd) fd, tmp_shadow = tempfile.mkstemp() os.close(fd) passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd') shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow') utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True) utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True) _set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow) utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True) os.unlink(tmp_passwd) utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True) os.unlink(tmp_shadow) def _set_passwd(username, admin_passwd, passwd_file, shadow_file): """set the password for username to admin_passwd The passwd_file is not modified. The shadow_file is updated. if the username is not found in both files, an exception is raised. :param username: the username :param encrypted_passwd: the encrypted password :param passwd_file: path to the passwd file :param shadow_file: path to the shadow password file :returns: nothing :raises: exception.Error(), IOError() """ salt_set = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789./') # encryption algo - id pairs for crypt() algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''} salt = 16 * ' ' salt = ''.join([random.choice(salt_set) for c in salt]) # crypt() depends on the underlying libc, and may not support all # forms of hash. We try md5 first. If we get only 13 characters back, # then the underlying crypt() didn't understand the '$n$salt' magic, # so we fall back to DES. # md5 is the default because it's widely supported. Although the # local crypt() might support stronger SHA, the target instance # might not. encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt) if len(encrypted_passwd) == 13: encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt) try: p_file = open(passwd_file, 'rb') s_file = open(shadow_file, 'rb') # username MUST exist in passwd file or it's an error found = False for entry in p_file: split_entry = entry.split(':') if split_entry[0] == username: found = True break if not found: msg = _('User %(username)s not found in password file.') raise exception.Error(msg % username) # update password in the shadow file.It's an error if the # the user doesn't exist. new_shadow = list() found = False for entry in s_file: split_entry = entry.split(':') if split_entry[0] == username: split_entry[1] = encrypted_passwd found = True new_entry = ':'.join(split_entry) new_shadow.append(new_entry) s_file.close() if not found: msg = _('User %(username)s not found in shadow file.') raise exception.Error(msg % username) s_file = open(shadow_file, 'wb') for entry in new_shadow: s_file.write(entry) finally: p_file.close() s_file.close()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3698_3
crossvul-python_data_good_3692_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3692_1
crossvul-python_data_bad_3724_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import logging from keystone.common import manager from keystone.common import wsgi CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or user.get('enabled', True) == False: try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3724_0
crossvul-python_data_bad_3632_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the nova.db namespace. Call these functions from nova.db namespace, not the nova.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova.openstack.common import cfg from nova import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('instance_name_template', default='instance-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('volume_name_template', default='volume-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%08x', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='nova.db.sqlalchemy.api') class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available targets""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_get_all(context): """Get all computeNodes.""" return IMPL.compute_node_get_all(context) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values, auto_adjust=True): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values, auto_adjust) def compute_node_get_by_host(context, host): return IMPL.compute_node_get_by_host(context, host) def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): return IMPL.compute_node_utilization_update(context, host, free_ram_mb_delta, free_disk_gb_delta, work_delta, vm_delta) def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): return IMPL.compute_node_utilization_set(context, host, free_ram_mb, free_disk_gb, work, vms) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_get_pools(context): """Returns a list of floating ip pools""" return IMPL.floating_ip_get_pools(context) def floating_ip_allocate_address(context, project_id, pool): """Allocate free floating ip from specified pool and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id, pool) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_by_fixed_address(context, fixed_address): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) def dnsdomain_list(context): """Get a list of all zones in our database, public and private.""" return IMPL.dnsdomain_list(context) def dnsdomain_register_for_zone(context, fqdomain, zone): """Associated a DNS domain with an availability zone""" return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) def dnsdomain_register_for_project(context, fqdomain, project): """Associated a DNS domain with a project id""" return IMPL.dnsdomain_register_for_project(context, fqdomain, project) def dnsdomain_unregister(context, fqdomain): """Purge associations for the specified DNS zone""" return IMPL.dnsdomain_unregister(context, fqdomain) def dnsdomain_get(context, fqdomain): """Get the db record for the specified domain.""" return IMPL.dnsdomain_get(context, fqdomain) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_all_unconfirmed(context, confirm_window): """Finds all unconfirmed migrations within the confirmation window.""" return IMPL.migration_get_all_unconfirmed(context, confirm_window) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get(context, id): """Get fixed ip by id or raise if it does not exist.""" return IMPL.fixed_ip_get(context, id) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ips_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ips_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) def virtual_interface_get_all(context): """Gets all virtual interfaces from the table""" return IMPL.virtual_interface_get_all(context) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc'): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window) def instance_test_and_set(context, instance_id, attr, ok_states, new_state): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ return IMPL.instance_test_and_set( context, instance_id, attr, ok_states, new_state) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_uuid): """Get instance actions by instance uuid.""" return IMPL.instance_get_actions(context, instance_uuid) def instance_get_id_to_uuid_mapping(context, ids): """Return a dictionary containing 'ID: UUID' given the ids""" return IMPL.instance_get_id_to_uuid_mapping(context, ids) ################### def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ return IMPL.instance_info_cache_create(context, values) def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return IMPL.instance_info_cache_get(context, instance_uuid) def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ return IMPL.instance_info_cache_update(context, instance_uuid, values) def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ return IMPL.instance_info_cache_delete(context, instance_uuid) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id, host=None): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id, host) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) def quota_class_destroy(context, class_name, resource): """Destroy the quota class or raise if it does not exist.""" return IMPL.quota_class_destroy(context, class_name, resource) def quota_class_destroy_all_by_name(context, class_name): """Destroy all quotas associated with a given quota class.""" return IMPL.quota_class_destroy_all_by_name(context, class_name) ################### def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" return IMPL.security_group_in_use(context, group_id) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False, filters=None): """Get all instance types.""" return IMPL.instance_type_get_all( context, inactive=inactive, filters=filters) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) #################### def cell_create(context, values): """Create a new child Cell entry.""" return IMPL.cell_create(context, values) def cell_update(context, cell_id, values): """Update a child Cell entry.""" return IMPL.cell_update(context, cell_id, values) def cell_delete(context, cell_id): """Delete a child Cell.""" return IMPL.cell_delete(context, cell_id) def cell_get(context, cell_id): """Get a specific child Cell.""" return IMPL.cell_get(context, cell_id) def cell_get_all(context): """Get all child Cells.""" return IMPL.cell_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def bw_usage_get_by_macs(context, macs, start_period): """Return bw usages for an instance in a given audit period.""" return IMPL.bw_usage_get_by_macs(context, macs, start_period) def bw_usage_update(context, mac, start_period, bw_in, bw_out): """Update cached bw usage for an instance and network Creates new record if needed.""" return IMPL.bw_usage_update(context, mac, start_period, bw_in, bw_out) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) ################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" return IMPL.s3_image_get(context, image_id) def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" return IMPL.s3_image_get_by_uuid(context, image_uuid) def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" return IMPL.s3_image_create(context, image_uuid) #################### def sm_backend_conf_create(context, values): """Create a new SM Backend Config entry.""" return IMPL.sm_backend_conf_create(context, values) def sm_backend_conf_update(context, sm_backend_conf_id, values): """Update a SM Backend Config entry.""" return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) def sm_backend_conf_delete(context, sm_backend_conf_id): """Delete a SM Backend Config.""" return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) def sm_backend_conf_get(context, sm_backend_conf_id): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) def sm_backend_conf_get_by_sr(context, sr_uuid): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) def sm_backend_conf_get_all(context): """Get all SM Backend Configs.""" return IMPL.sm_backend_conf_get_all(context) #################### def sm_flavor_create(context, values): """Create a new SM Flavor entry.""" return IMPL.sm_flavor_create(context, values) def sm_flavor_update(context, sm_flavor_id, values): """Update a SM Flavor entry.""" return IMPL.sm_flavor_update(context, values) def sm_flavor_delete(context, sm_flavor_id): """Delete a SM Flavor.""" return IMPL.sm_flavor_delete(context, sm_flavor_id) def sm_flavor_get(context, sm_flavor): """Get a specific SM Flavor.""" return IMPL.sm_flavor_get(context, sm_flavor) def sm_flavor_get_all(context): """Get all SM Flavors.""" return IMPL.sm_flavor_get_all(context) #################### def sm_volume_create(context, values): """Create a new child Zone entry.""" return IMPL.sm_volume_create(context, values) def sm_volume_update(context, volume_id, values): """Update a child Zone entry.""" return IMPL.sm_volume_update(context, values) def sm_volume_delete(context, volume_id): """Delete a child Zone.""" return IMPL.sm_volume_delete(context, volume_id) def sm_volume_get(context, volume_id): """Get a specific child Zone.""" return IMPL.sm_volume_get(context, volume_id) def sm_volume_get_all(context): """Get all child Zones.""" return IMPL.sm_volume_get_all(context) #################### def aggregate_create(context, values, metadata=None): """Create a new aggregate with metadata.""" return IMPL.aggregate_create(context, values, metadata) def aggregate_get(context, aggregate_id): """Get a specific aggregate by id.""" return IMPL.aggregate_get(context, aggregate_id) def aggregate_get_by_host(context, host): """Get a specific aggregate by host""" return IMPL.aggregate_get_by_host(context, host) def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. If values contains a metadata key, it updates the aggregate metadata too.""" return IMPL.aggregate_update(context, aggregate_id, values) def aggregate_delete(context, aggregate_id): """Delete an aggregate.""" return IMPL.aggregate_delete(context, aggregate_id) def aggregate_get_all(context): """Get all aggregates.""" return IMPL.aggregate_get_all(context) def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): """Add/update metadata. If set_delete=True, it adds only.""" IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) def aggregate_metadata_get(context, aggregate_id): """Get metadata for the specified aggregate.""" return IMPL.aggregate_metadata_get(context, aggregate_id) def aggregate_metadata_delete(context, aggregate_id, key): """Delete the given metadata key.""" IMPL.aggregate_metadata_delete(context, aggregate_id, key) def aggregate_host_add(context, aggregate_id, host): """Add host to the aggregate.""" IMPL.aggregate_host_add(context, aggregate_id, host) def aggregate_host_get_all(context, aggregate_id): """Get hosts for the specified aggregate.""" return IMPL.aggregate_host_get_all(context, aggregate_id) def aggregate_host_delete(context, aggregate_id, host): """Delete the given host from the aggregate.""" IMPL.aggregate_host_delete(context, aggregate_id, host) #################### def instance_fault_create(context, values): """Create a new Instance Fault.""" return IMPL.instance_fault_create(context, values) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3632_2
crossvul-python_data_bad_3695_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import sql from keystone import exception from keystone import token class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) @classmethod def from_dict(cls, token_dict): # shove any non-indexed properties into extra extra = copy.deepcopy(token_dict) data = {} for k in ('id', 'expires'): data[k] = extra.pop(k, None) data['extra'] = extra return cls(**data) def to_dict(self): out = copy.deepcopy(self.extra) out['id'] = self.id out['expires'] = self.expires return out class Token(sql.Base, token.Driver): # Public interface def get_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel).filter_by(id=token_id).first() now = datetime.datetime.utcnow() if token_ref and (not token_ref.expires or now < token_ref.expires): return token_ref.to_dict() else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data_copy: data_copy['expires'] = self._get_default_expire_time() token_ref = TokenModel.from_dict(data_copy) token_ref.id = token_id session = self.get_session() with session.begin(): session.add(token_ref) session.flush() return token_ref.to_dict() def delete_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel)\ .filter_by(id=token_id)\ .first() if not token_ref: raise exception.TokenNotFound(token_id=token_id) with session.begin(): session.delete(token_ref) session.flush()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3695_3
crossvul-python_data_good_5236_0
# # Limited command Shell (lshell) # # Copyright (C) 2008-2013 Ignace Mouzannar (ghantoos) <ghantoos@ghantoos.org> # # This file is part of lshell # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import re import os # import lshell specifics from lshell import utils def warn_count(messagetype, command, conf, strict=None, ssh=None): """ Update the warning_counter, log and display a warning to the user """ log = conf['logpath'] if not ssh: if strict: conf['warning_counter'] -= 1 if conf['warning_counter'] < 0: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) log.critical('*** Kicked out') sys.exit(1) else: log.critical('*** forbidden %s -> "%s"' % (messagetype, command)) sys.stderr.write('*** You have %s warning(s) left,' ' before getting kicked out.\n' % conf['warning_counter']) log.error('*** User warned, counter: %s' % conf['warning_counter']) sys.stderr.write('This incident has been reported.\n') else: if not conf['quiet']: log.critical('*** forbidden %s: %s' % (messagetype, command)) # if you are here, means that you did something wrong. Return 1. return 1, conf def check_path(line, conf, completion=None, ssh=None, strict=None): """ Check if a path is entered in the line. If so, it checks if user are allowed to see this path. If user is not allowed, it calls warn_count. In case of completion, it only returns 0 or 1. """ allowed_path_re = str(conf['path'][0]) denied_path_re = str(conf['path'][1][:-1]) # split line depending on the operators sep = re.compile(r'\ |;|\||&') line = line.strip() line = sep.split(line) for item in line: # remove potential quotes or back-ticks item = re.sub(r'^["\'`]|["\'`]$', '', item) # remove potential $(), ${}, `` item = re.sub(r'^\$[\(\{]|[\)\}]$', '', item) # if item has been converted to something other than a string # or an int, reconvert it to a string if type(item) not in ['str', 'int']: item = str(item) # replace "~" with home path item = os.path.expanduser(item) # expand shell wildcards using "echo" # i know, this a bit nasty... if re.findall('\$|\*|\?', item): # remove quotes if available item = re.sub("\"|\'", "", item) import subprocess p = subprocess.Popen("`which echo` %s" % item, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cout = p.stdout try: item = cout.readlines()[0].decode('utf8').split(' ')[0] item = item.strip() item = os.path.expandvars(item) except IndexError: conf['logpath'].critical('*** Internal error: command not ' 'executed') return 1, conf tomatch = os.path.realpath(item) if os.path.isdir(tomatch) and tomatch[-1] != '/': tomatch += '/' match_allowed = re.findall(allowed_path_re, tomatch) if denied_path_re: match_denied = re.findall(denied_path_re, tomatch) else: match_denied = None # if path not allowed # case path executed: warn, and return 1 # case completion: return 1 if not match_allowed or match_denied: if not completion: ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) return 1, conf if not completion: if not re.findall(allowed_path_re, os.getcwd() + '/'): ret, conf = warn_count('path', tomatch, conf, strict=strict, ssh=ssh) os.chdir(conf['home_path']) conf['promptprint'] = utils.updateprompt(os.getcwd(), conf) return 1, conf return 0, conf def check_secure(line, conf, strict=None, ssh=None): """This method is used to check the content on the typed command. Its purpose is to forbid the user to user to override the lshell command restrictions. The forbidden characters are placed in the 'forbidden' variable. Feel free to update the list. Emptying it would be quite useless..: ) A warning counter has been added, to kick out of lshell a user if he is warned more than X time (X being the 'warning_counter' variable). """ # store original string oline = line # strip all spaces/tabs line = line.strip() # init return code returncode = 0 # This logic is kept crudely simple on purpose. # At most we might match the same stanza twice # (for e.g. "'a'", 'a') but the converse would # require detecting single quotation stanzas # nested within double quotes and vice versa relist = re.findall(r'[^=]\"(.+)\"', line) relist2 = re.findall(r'[^=]\'(.+)\'', line) relist = relist + relist2 for item in relist: if os.path.exists(item): ret_check_path, conf = check_path(item, conf, strict=strict) returncode += ret_check_path # ignore quoted text line = re.sub(r'\"(.+?)\"', '', line) line = re.sub(r'\'(.+?)\'', '', line) if re.findall('[:cntrl:].*\n', line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf for item in conf['forbidden']: # allow '&&' and '||' even if singles are forbidden if item in ['&', '|']: if re.findall("[^\%s]\%s[^\%s]" % (item, item, item), line): ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf else: if item in line: ret, conf = warn_count('syntax', oline, conf, strict=strict, ssh=ssh) return ret, conf # check if the line contains $(foo) executions, and check them executions = re.findall('\$\([^)]+[)]', line) for item in executions: # recurse on check_path ret_check_path, conf = check_path(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_path # recurse on check_secure ret_check_secure, conf = check_secure(item[2:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check for executions using back quotes '`' executions = re.findall('\`[^`]+[`]', line) for item in executions: ret_check_secure, conf = check_secure(item[1:-1].strip(), conf, strict=strict) returncode += ret_check_secure # check if the line contains ${foo=bar}, and check them curly = re.findall('\$\{[^}]+[}]', line) for item in curly: # split to get variable only, and remove last character "}" if re.findall(r'=|\+|\?|\-', item): variable = re.split('=|\+|\?|\-', item, 1) else: variable = item ret_check_path, conf = check_path(variable[1][:-1], conf, strict=strict) returncode += ret_check_path # if unknown commands where found, return 1 and don't execute the line if returncode > 0: return 1, conf # in case the $(foo) or `foo` command passed the above tests elif line.startswith('$(') or line.startswith('`'): return 0, conf # in case ';', '|' or '&' are not forbidden, check if in line lines = [] # corrected by Alojzij Blatnik #48 # test first character if line[0] in ["&", "|", ";"]: start = 1 else: start = 0 # split remaining command line for i in range(1, len(line)): # in case \& or \| or \; don't split it if line[i] in ["&", "|", ";"] and line[i - 1] != "\\": # if there is more && or || skip it if start != i: lines.append(line[start:i]) start = i + 1 # append remaining command line if start != len(line): lines.append(line[start:len(line)]) # remove trailing parenthesis line = re.sub('\)$', '', line) for separate_line in lines: separate_line = " ".join(separate_line.split()) splitcmd = separate_line.strip().split(' ') command = splitcmd[0] if len(splitcmd) > 1: cmdargs = splitcmd else: cmdargs = None # in case of a sudo command, check in sudo_commands list if allowed if command == 'sudo': if type(cmdargs) == list: # allow the -u (user) flag if cmdargs[1] == '-u' and cmdargs: sudocmd = cmdargs[3] else: sudocmd = cmdargs[1] if sudocmd not in conf['sudo_commands'] and cmdargs: ret, conf = warn_count('sudo command', oline, conf, strict=strict, ssh=ssh) return ret, conf # if over SSH, replaced allowed list with the one of overssh if ssh: conf['allowed'] = conf['overssh'] # for all other commands check in allowed list if command not in conf['allowed'] and command: ret, conf = warn_count('command', command, conf, strict=strict, ssh=ssh) return ret, conf return 0, conf
./CrossVul/dataset_final_sorted/CWE-264/py/good_5236_0
crossvul-python_data_good_3772_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import sys import traceback import eventlet from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable, ) from glance.api import common from glance.api import policy import glance.api.v1 from glance import context from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance.store import (create_stores, get_from_backend, get_size_from_backend, safe_delete_from_backend, schedule_delayed_delete_from_backend, get_store_from_location, get_store_from_scheme) LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] # Defined at module level due to _is_opt_registered # identity check (not equality). default_store_opt = cfg.StrOpt('default_store', default='file') CONF = cfg.CONF CONF.register_opt(default_store_opt) def validate_image_meta(req, values): name = values.get('name') disk_format = values.get('disk_format') container_format = values.get('container_format') if 'disk_format' in values: if not disk_format in DISK_FORMATS: msg = "Invalid disk format '%s' for image." % disk_format raise HTTPBadRequest(explanation=msg, request=req) if 'container_format' in values: if not container_format in CONTAINER_FORMATS: msg = "Invalid container format '%s' for image." % container_format raise HTTPBadRequest(explanation=msg, request=req) if name and len(name) > 255: msg = _('Image name too long: %d') % len(name) raise HTTPBadRequest(explanation=msg, request=req) amazon_formats = ('aki', 'ari', 'ami') if disk_format in amazon_formats or container_format in amazon_formats: if disk_format is None: values['disk_format'] = container_format elif container_format is None: values['container_format'] = disk_format elif container_format != disk_format: msg = ("Invalid mix of disk and container formats. " "When setting a disk or container format to " "one of 'aki', 'ari', or 'ami', the container " "and disk formats must match.") raise HTTPBadRequest(explanation=msg, request=req) return values class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ def __init__(self): create_stores() self.verify_scheme_or_exit(CONF.default_store) self.notifier = notifier.Notifier() registry.configure_registry_client() self.policy = policy.Enforcer() self.pool = eventlet.GreenPool(size=1024) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(context, where): try: image_data, image_size = get_from_backend(context, where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') self._enforce(req, 'download_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(req.context, image_meta['location']) image_iterator = utils.cooperative_iter(image_iterator) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(req.context, image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from) except Exception as e: self._safe_kill(req, image_meta['id']) msg = _("Copy from external source failed: %s") % e LOG.error(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") LOG.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file scheme = req.headers.get('x-image-meta-store', CONF.default_store) store = self.get_store_or_400(req, scheme) image_id = image_meta['id'] LOG.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug(_("Uploading image data for image %(image_id)s " "to %(scheme)s store"), locals()) try: location, size, checksum = store.add( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size']) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() LOG.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store LOG.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPConflict(explanation=msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded, e: msg = _("Denying attempt to upload image larger than %d bytes.") self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg % CONF.image_size_cap, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) self.notifier.error('image.upload', e.explanation) #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. raise e except Exception, e: tb_info = traceback.format_exc() LOG.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) self.notifier.error('image.upload', msg) raise HTTPBadRequest(explanation=msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: image_meta_data = registry.update_image_metadata(req.context, image_id, image_meta) self.notifier.info("image.update", image_meta_data) return image_meta_data except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: LOG.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) if location else None def _get_size(self, context, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(context, location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data: image_meta = self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._upload_and_activate(req, image_meta) elif self._copy_from(req): msg = _('Triggering asynchronous copy from external source') LOG.info(msg) self.pool.spawn_n(self._upload_and_activate, req, image_meta) else: location = image_meta.get('location') if location: self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._activate(req, image_id, location) return image_meta def _validate_image_for_activation(self, req, id, values): """Ensures that all required image metadata values are valid.""" image = self.get_image_meta_or_404(req, id) if not 'disk_format' in values: values['disk_format'] = image['disk_format'] if not 'container_format' in values: values['container_format'] = image['container_format'] if not 'name' in values: values['name'] = image['name'] values = validate_image_meta(req, values) return values @utils.mutating def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) location_uri = image_meta.get('location') if location_uri: self.update_store_acls(req, id, location_uri, public=is_public) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) # Make image public in the backend store (if implemented) orig_or_updated_loc = location or orig_image_meta.get('location', None) if orig_or_updated_loc: self.update_store_acls(req, id, orig_or_updated_loc, public=is_public) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(req.context, image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if image['location'] and CONF.delayed_delete: status = 'pending_delete' else: status = 'deleted' try: # Delete the image from the registry first, since we rely on it # for authorization checks. # See https://bugs.launchpad.net/glance/+bug/1065187 registry.update_image_metadata(req.context, id, {'status': status}) registry.delete_image_metadata(req.context, id) # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 if image['location']: if CONF.delayed_delete: schedule_delayed_delete_from_backend(image['location'], id) else: safe_delete_from_backend(image['location'], req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.delete', msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.delete', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', image) def get_store_or_400(self, request, scheme): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param scheme: The backend store scheme :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(request.context, scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) raise HTTPBadRequest(explanation=msg, request=request, content_type='text/plain') def verify_scheme_or_exit(self, scheme): """ Verifies availability of the storage backend for the given scheme or exits :param scheme: The backend store scheme """ try: get_store_from_scheme(context.RequestContext(), scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg % scheme) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(explanation=msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) #NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = _("Denying attempt to upload image larger than %d bytes.") LOG.warn(msg % max_image_size) raise HTTPBadRequest(explanation=msg % max_image_size, request=request) result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self): self.notifier = notifier.Notifier() def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location.encode('utf-8') def _inject_checksum_header(self, response, image_meta): if image_meta['checksum'] is not None: response.headers['ETag'] = image_meta['checksum'].encode('utf-8') def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k.encode('utf-8')] = v.encode('utf-8') def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] image_iter = result['image_iterator'] # image_meta['size'] should be an int, but could possibly be a str expected_size = int(image_meta['size']) response.app_iter = common.size_checked_iter( response, image_meta, expected_size, image_iter, self.notifier) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = str(image_meta['size']) response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3772_0
crossvul-python_data_bad_3772_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import sys import traceback import eventlet from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable, ) from glance.api import common from glance.api import policy import glance.api.v1 from glance import context from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance.store import (create_stores, get_from_backend, get_size_from_backend, safe_delete_from_backend, schedule_delayed_delete_from_backend, get_store_from_location, get_store_from_scheme) LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] # Defined at module level due to _is_opt_registered # identity check (not equality). default_store_opt = cfg.StrOpt('default_store', default='file') CONF = cfg.CONF CONF.register_opt(default_store_opt) def validate_image_meta(req, values): name = values.get('name') disk_format = values.get('disk_format') container_format = values.get('container_format') if 'disk_format' in values: if not disk_format in DISK_FORMATS: msg = "Invalid disk format '%s' for image." % disk_format raise HTTPBadRequest(explanation=msg, request=req) if 'container_format' in values: if not container_format in CONTAINER_FORMATS: msg = "Invalid container format '%s' for image." % container_format raise HTTPBadRequest(explanation=msg, request=req) if name and len(name) > 255: msg = _('Image name too long: %d') % len(name) raise HTTPBadRequest(explanation=msg, request=req) amazon_formats = ('aki', 'ari', 'ami') if disk_format in amazon_formats or container_format in amazon_formats: if disk_format is None: values['disk_format'] = container_format elif container_format is None: values['container_format'] = disk_format elif container_format != disk_format: msg = ("Invalid mix of disk and container formats. " "When setting a disk or container format to " "one of 'aki', 'ari', or 'ami', the container " "and disk formats must match.") raise HTTPBadRequest(explanation=msg, request=req) return values class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ def __init__(self): create_stores() self.verify_scheme_or_exit(CONF.default_store) self.notifier = notifier.Notifier() registry.configure_registry_client() self.policy = policy.Enforcer() self.pool = eventlet.GreenPool(size=1024) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(context, where): try: image_data, image_size = get_from_backend(context, where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') self._enforce(req, 'download_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(req.context, image_meta['location']) image_iterator = utils.cooperative_iter(image_iterator) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(req.context, image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from) except Exception as e: self._safe_kill(req, image_meta['id']) msg = _("Copy from external source failed: %s") % e LOG.error(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") LOG.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file scheme = req.headers.get('x-image-meta-store', CONF.default_store) store = self.get_store_or_400(req, scheme) image_id = image_meta['id'] LOG.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug(_("Uploading image data for image %(image_id)s " "to %(scheme)s store"), locals()) try: location, size, checksum = store.add( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size']) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() LOG.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store LOG.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPConflict(explanation=msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded, e: msg = _("Denying attempt to upload image larger than %d bytes.") self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg % CONF.image_size_cap, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) self.notifier.error('image.upload', e.explanation) #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. raise e except Exception, e: tb_info = traceback.format_exc() LOG.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) self.notifier.error('image.upload', msg) raise HTTPBadRequest(explanation=msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: image_meta_data = registry.update_image_metadata(req.context, image_id, image_meta) self.notifier.info("image.update", image_meta_data) return image_meta_data except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: LOG.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) if location else None def _get_size(self, context, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(context, location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data: image_meta = self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._upload_and_activate(req, image_meta) elif self._copy_from(req): msg = _('Triggering asynchronous copy from external source') LOG.info(msg) self.pool.spawn_n(self._upload_and_activate, req, image_meta) else: location = image_meta.get('location') if location: self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._activate(req, image_id, location) return image_meta def _validate_image_for_activation(self, req, id, values): """Ensures that all required image metadata values are valid.""" image = self.get_image_meta_or_404(req, id) if not 'disk_format' in values: values['disk_format'] = image['disk_format'] if not 'container_format' in values: values['container_format'] = image['container_format'] if not 'name' in values: values['name'] = image['name'] values = validate_image_meta(req, values) return values @utils.mutating def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) location_uri = image_meta.get('location') if location_uri: self.update_store_acls(req, id, location_uri, public=is_public) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) # Make image public in the backend store (if implemented) orig_or_updated_loc = location or orig_image_meta.get('location', None) if orig_or_updated_loc: self.update_store_acls(req, id, orig_or_updated_loc, public=is_public) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(req.context, image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") status = 'deleted' try: # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 if image['location']: if CONF.delayed_delete: status = 'pending_delete' schedule_delayed_delete_from_backend(image['location'], id) else: safe_delete_from_backend(image['location'], req.context, id) registry.update_image_metadata(req.context, id, {'status': status}) registry.delete_image_metadata(req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.delete', msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.delete', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', image) def get_store_or_400(self, request, scheme): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param scheme: The backend store scheme :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(request.context, scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) raise HTTPBadRequest(explanation=msg, request=request, content_type='text/plain') def verify_scheme_or_exit(self, scheme): """ Verifies availability of the storage backend for the given scheme or exits :param scheme: The backend store scheme """ try: get_store_from_scheme(context.RequestContext(), scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg % scheme) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(explanation=msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) #NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = _("Denying attempt to upload image larger than %d bytes.") LOG.warn(msg % max_image_size) raise HTTPBadRequest(explanation=msg % max_image_size, request=request) result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self): self.notifier = notifier.Notifier() def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location.encode('utf-8') def _inject_checksum_header(self, response, image_meta): if image_meta['checksum'] is not None: response.headers['ETag'] = image_meta['checksum'].encode('utf-8') def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k.encode('utf-8')] = v.encode('utf-8') def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] image_iter = result['image_iterator'] # image_meta['size'] should be an int, but could possibly be a str expected_size = int(image_meta['size']) response.app_iter = common.size_checked_iter( response, image_meta, expected_size, image_iter, self.notifier) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = str(image_meta['size']) response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3772_0
crossvul-python_data_good_3633_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import os import re import shutil import tempfile import time import urllib from nova import block_device from nova import compute from nova import context from nova import crypto from nova import db from nova import exception from nova import flags from nova import ipv6 from nova import log as logging from nova import network from nova import rpc from nova import quota from nova import utils from nova import volume from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.compute import vm_states from nova.image import s3 FLAGS = flags.FLAGS flags.DECLARE('dhcp_domain', 'nova.network.manager') flags.DECLARE('service_down_time', 'nova.scheduler.driver') LOG = logging.getLogger("nova.api.cloud") def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { None: 'pending', vm_states.ACTIVE: 'running', vm_states.BUILDING: 'pending', vm_states.REBUILDING: 'pending', vm_states.DELETED: 'terminated', vm_states.STOPPED: 'stopped', vm_states.MIGRATING: 'migrate', vm_states.RESIZING: 'resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', vm_states.RESCUED: 'rescue', } def state_description_from_vm_state(vm_state): """Map the vm state to the server status string""" return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', 'ephemeral0': 'sda2', 'root': _DEFAULT_ROOT_DEVICE_NAME, 'swap': 'sda3'} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) self.setup() def __str__(self): return 'CloudController' def setup(self): """ Ensure the keychains and folders exist. """ # FIXME(ja): this should be moved to a nova-manage command, # if not setup throw exceptions instead of running # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): os.makedirs(FLAGS.keys_path) # Gen root CA, if we don't have one root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) if not os.path.exists(root_ca_path): genrootca_sh_path = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'CA', 'genrootca.sh') start = os.getcwd() if not os.path.exists(FLAGS.ca_path): os.makedirs(FLAGS.ca_path) os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path) os.chdir(start) def _get_mpi_data(self, context, project_id): result = {} search_opts = {'project_id': project_id} for instance in self.compute_api.get_all(context, search_opts=search_opts): if instance['fixed_ips']: line = '%s slots=%d' % (instance['fixed_ips'][0]['address'], instance['vcpus']) key = str(instance['key_name']) if key in result: result[key].append(line) else: result[key] = [line] return result def _get_availability_zone_by_host(self, context, host): services = db.service_get_all_by_host(context.elevated(), host) if len(services) > 0: return services[0]['availability_zone'] return 'unknown zone' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def _format_instance_mapping(self, ctxt, instance_ref): root_device_name = instance_ref['root_device_name'] if root_device_name is None: return _DEFAULT_MAPPINGS mappings = {} mappings['ami'] = block_device.strip_dev(root_device_name) mappings['root'] = root_device_name default_local_device = instance_ref.get('default_local_device') if default_local_device: mappings['ephemeral0'] = default_local_device default_swap_device = instance_ref.get('default_swap_device') if default_swap_device: mappings['swap'] = default_swap_device ebs_devices = [] # 'ephemeralN', 'swap' and ebs for bdm in db.block_device_mapping_get_all_by_instance( ctxt, instance_ref['id']): if bdm['no_device']: continue # ebs volume case if (bdm['volume_id'] or bdm['snapshot_id']): ebs_devices.append(bdm['device_name']) continue virtual_name = bdm['virtual_name'] if not virtual_name: continue if block_device.is_swap_or_ephemeral(virtual_name): mappings[virtual_name] = bdm['device_name'] # NOTE(yamahata): I'm not sure how ebs device should be numbered. # Right now sort by device name for deterministic # result. if ebs_devices: nebs = 0 ebs_devices.sort() for ebs in ebs_devices: mappings['ebs%d' % nebs] = ebs nebs += 1 return mappings def get_metadata(self, address): ctxt = context.get_admin_context() search_opts = {'fixed_ip': address} try: instance_ref = self.compute_api.get_all(ctxt, search_opts=search_opts) except exception.NotFound: instance_ref = None if not instance_ref: return None # This ensures that all attributes of the instance # are populated. instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain) host = instance_ref['host'] availability_zone = self._get_availability_zone_by_host(ctxt, host) floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) security_groups = db.security_group_get_by_instance(ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] mappings = self._format_instance_mapping(ctxt, instance_ref) data = { 'user-data': self._format_user_data(instance_ref), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi}} # public-keys should be in meta-data only if user specified one if instance_ref['key_name']: data['meta-data']['public-keys'] = { '0': {'_name': instance_ref['key_name'], 'openssh-key': instance_ref['key_data']}} for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], self._image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] if False: # TODO(vish): store product codes data['product-codes'] = [] return data def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services \ if service['host'] == host] for svc in hsvcs: delta = now - (svc['updated_at'] or svc['created_at']) alive = (delta.seconds <= FLAGS.service_down_time) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] s['display_name'] = snapshot['display_name'] s['display_description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) snapshot = self.volume_api.create_snapshot( context, volume_id=volume_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or \ not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_public_key(self, context, key_name, public_key, fingerprint=None): LOG.audit(_("Import key %s"), key_name, context=context) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key if fingerprint is None: tmpdir = tempfile.mkdtemp() pubfile = os.path.join(tmpdir, 'temp.pub') fh = open(pubfile, 'w') fh.write(public_key) fh.close() (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', '%s' % (pubfile)) fingerprint = out.split(' ')[1] shutil.rmtree(tmpdir) key['fingerprint'] = fingerprint db.key_pair_create(context, key) return True def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65536)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = \ db.security_group_get_by_name(context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr_ip) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if ip_protocol and from_port and to_port: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if from_port > to_port: raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: if 'group_id' in values: if rule['group_id'] == values['group_id']: return rule['id'] else: is_duplicate = True for key in ('cidr', 'from_port', 'to_port', 'protocol'): if rule[key] != values[key]: is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = "Revoke security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = "%s Not enough parameters to build a valid rule" raise exception.ApiError(_(err % rulesvalues)) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = "Authorize security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = "%s Not enough parameters to build a valid rule" raise exception.ApiError(_(err % rulesvalues)) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = '%s - This rule already exists in group' raise exception.ApiError(_(err) % values_for_rule) postvalues.append(values_for_rule) allowed = quota.allowed_security_group_rules(context, security_group['id'], 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exception.ApiError(msg) for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exception.ApiError(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = "Not enough parameters, need group_name or group_id" raise exception.ApiError(_(err)) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if type(instance_id) == list: ec2_id = instance_id[0] else: ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def get_ajax_console(self, context, instance_id, **kwargs): ec2_id = instance_id[0] instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_ajax_console(context, instance_id=instance_id) def get_vnc_console(self, context, instance_id, **kwargs): """Returns vnc browser url. Used by OS dashboard.""" ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_vnc_console(context, instance_id=instance_id) def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, volume_id=internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') != None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') != None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) self.volume_api.delete(context, volume_id=volume_id) return True def update_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: self.volume_api.update(context, volume_id=volume_id, fields=changes) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance_id=instance_id, volume_id=volume_id, device=device) volume = self.volume_api.get(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id=volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, instance_ref, result, key): kernel_id = instance_ref['kernel_id'] if kernel_id is None: return result[key] = self.image_ec2_id(instance_ref['kernel_id'], 'aki') def _format_ramdisk_id(self, instance_ref, result, key): ramdisk_id = instance_ref['ramdisk_id'] if ramdisk_id is None: return result[key] = self.image_ec2_id(instance_ref['ramdisk_id'], 'ari') @staticmethod def _format_user_data(instance_ref): return base64.b64decode(instance_ref['user_data']) def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.ApiError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): _unsupported_attribute(instance, result) def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): vm_state = instance['vm_state'] state_to_value = { vm_states.STOPPED: 'stopped', vm_states.DELETED: 'terminated', } value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = self._format_user_data(instance) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.ApiError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id=volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or _DEFAULT_ROOT_DEVICE_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts) except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self.image_ec2_id(instance['image_ref']) self._format_kernel_id(instance, i, 'kernelId') self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['power_state'], 'name': state_description_from_vm_state(instance['vm_state'])} fixed_addr = None floating_addr = None if instance['fixed_ips']: fixed = instance['fixed_ips'][0] fixed_addr = fixed['address'] if fixed['floating_ips']: floating_addr = fixed['floating_ips'][0]['address'] if fixed['network'] and use_v6: i['dnsNameV6'] = ipv6.to_global( fixed['network']['cidr_v6'], fixed['virtual_interface']['address'], instance['project_id']) i['privateDnsName'] = fixed_addr i['privateIpAddress'] = fixed_addr i['publicDnsName'] = floating_addr i['ipAddress'] = floating_addr or fixed_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] zone = self._get_availability_zone_by_host(context, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] if context.is_admin: iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, context.project_id) for floating_ip_ref in iterator: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): instance_id = floating_ip_ref['fixed_ip']['instance']['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': raise exception.NoMoreFloatingIps() else: raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, address=public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = kernel['id'] if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.ApiError(_('Image must be available')) instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), display_description=kwargs.get('display_description'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, reservation_id=instances[0]['reservation_id']) def _do_instance(self, action, context, ec2_id): instance_id = ec2utils.ec2_id_to_id(ec2_id) action(context, instance_id=instance_id) def _do_instances(self, action, context, instance_id): for ec2_id in instance_id: self._do_instance(action, context, ec2_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) self._do_instances(self.compute_api.delete, context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) self._do_instances(self.compute_api.reboot, context, instance_id) return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) self._do_instances(self.compute_api.stop, context, instance_id) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) self._do_instances(self.compute_api.start, context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" self._do_instance(self.compute_api.rescue, context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" self._do_instance(self.compute_api.unrescue, context, instance_id) return True def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **changes) return True @staticmethod def _image_type(image_type): """Converts to a three letter image type. aki, kernel => aki ari, ramdisk => ari anything else => ami """ if image_type == 'kernel': return 'aki' if image_type == 'ramdisk': return 'ari' if image_type not in ['aki', 'ari']: return 'ami' return image_type @staticmethod def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' try: return ec2utils.id_to_ec2_id(int(image_id), template=template) except ValueError: #TODO(wwolf): once we have ec2_id -> glance_id mapping # in place, this wont be necessary return "ami-00000000" def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if self._image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by BaseImageService to S3 format.""" i = {} image_type = self._image_type(image.get('container_format')) ec2_id = self.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = self.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image['properties'].get('owner_id') if name: i['imageLocation'] = "%s (%s)" % (image['properties']. get('image_location'), name) else: i['imageLocation'] = image['properties'].get('image_location') i['imageState'] = self._get_image_state(image) i['displayName'] = name i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = image.get('is_public') == True i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or _DEFAULT_ROOT_DEVICE_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = self._image_type(image.get('container_format')) image_id = self.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] metadata = {'properties': {'image_location': image_location}} if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = \ kwargs.get('root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): result['rootDeviceName'] = \ block_device.properties_root_device_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.ApiError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.ApiError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.ApiError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') return self.image_service.update(context, internal_id, image) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state == vm_states.ACTIVE: restart_instance = True self.compute_api.stop(context, instance_id=instance_id) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.ApiError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id vol = self.volume_api.get(context, volume_id=volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume_id=volume_id, name=vol['display_name'], description=vol['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_0
crossvul-python_data_bad_3693_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Token service.""" import datetime from keystone import config from keystone import exception from keystone.common import manager CONF = config.CONF config.register_int('expiration', group='token', default=86400) class Manager(manager.Manager): """Default pivot point for the Token backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.token.driver) class Driver(object): """Interface description for a Token driver.""" def get_token(self, token_id): """Get a token by id. :param token_id: identity of the token :type token_id: string :returns: token_ref :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def create_token(self, token_id, data): """Create a token by id and data. :param token_id: identity of the token :type token_id: string :param data: dictionary with additional reference information :: { expires='' id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref } :type data: dict :returns: token_ref or None. """ raise exception.NotImplemented() def delete_token(self, token_id): """Deletes a token by id. :param token_id: identity of the token :type token_id: string :returns: None. :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def _get_default_expire_time(self): """Determine when a token should expire based on the config. :returns: a naive utc datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) return datetime.datetime.utcnow() + expire_delta
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3693_3
crossvul-python_data_good_3785_0
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import json import re import urllib import webob.exc from glance.api import policy import glance.api.v2 as v2 from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import timeutils import glance.schema import glance.store LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.db_api.configure_db() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance.store self.store_api.create_stores() def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise webob.exc.HTTPForbidden() def _normalize_properties(self, image): """Convert the properties from the stored format to a dict The db api returns a list of dicts that look like {'name': <key>, 'value': <value>}, while it expects a format like {<key>: <value>} in image create and update calls. This function takes the extra step that the db api should be responsible for in the image get calls. The db api will also return deleted image properties that must be filtered out. """ properties = [(p['name'], p['value']) for p in image['properties'] if not p['deleted']] image['properties'] = dict(properties) return image def _extract_tags(self, image): try: #NOTE(bcwaldon): cast to set to make the list unique, then # cast back to list since that's a more sane response type return list(set(image.pop('tags'))) except KeyError: pass def _append_tags(self, context, image): image['tags'] = self.db_api.image_tag_get_all(context, image['id']) return image @utils.mutating def create(self, req, image): self._enforce(req, 'add_image') is_public = image.get('is_public') if is_public: self._enforce(req, 'publicize_image') image['owner'] = req.context.owner image['status'] = 'queued' tags = self._extract_tags(image) image = dict(self.db_api.image_create(req.context, image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image['id'], tags) image['tags'] = tags else: image['tags'] = [] v2.update_image_read_acl(req, self.store_api, self.db_api, image) image = self._normalize_properties(dict(image)) self.notifier.info('image.update', image) return image def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters={}): self._enforce(req, 'get_images') filters['deleted'] = False #NOTE(bcwaldon): is_public=True gets public images and those # owned by the authenticated tenant result = {} filters.setdefault('is_public', True) if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) try: images = self.db_api.image_get_all(req.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) if len(images) != 0 and len(images) == limit: result['next_marker'] = images[-1]['id'] except exception.InvalidFilterRangeValue as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.InvalidSortKey as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.NotFound as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) images = [self._normalize_properties(dict(image)) for image in images] result['images'] = [self._append_tags(req.context, image) for image in images] return result def _get_image(self, context, image_id): try: return self.db_api.image_get(context, image_id) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() def show(self, req, image_id): self._enforce(req, 'get_image') image = self._get_image(req.context, image_id) image = self._normalize_properties(dict(image)) return self._append_tags(req.context, image) @utils.mutating def update(self, req, image_id, changes): self._enforce(req, 'modify_image') context = req.context try: image = self.db_api.image_get(context, image_id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to update" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound(explanation=msg) image = self._normalize_properties(dict(image)) updates = self._extract_updates(req, image, changes) tags = None if len(updates) > 0: tags = self._extract_tags(updates) purge_props = 'properties' in updates try: image = self.db_api.image_update(context, image_id, updates, purge_props) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() image = self._normalize_properties(dict(image)) v2.update_image_read_acl(req, self.store_api, self.db_api, image) if tags is not None: self.db_api.image_tag_set_all(req.context, image_id, tags) image['tags'] = tags else: self._append_tags(req.context, image) self.notifier.info('image.update', image) return image def _extract_updates(self, req, image, changes): """ Determine the updates to pass to the database api. Given the current image, convert a list of changes to be made into the corresponding update dictionary that should be passed to db_api.image_update. Changes have the following parts op - 'add' a new attribute, 'replace' an existing attribute, or 'remove' an existing attribute. path - A list of path parts for determining which attribute the the operation applies to. value - For 'add' and 'replace', the new value the attribute should assume. For the current use case, there are two types of valid paths. For base attributes (fields stored directly on the Image object) the path must take the form ['<attribute name>']. These attributes are always present so the only valid operation on them is 'replace'. For image properties, the path takes the form ['properties', '<property name>'] and all operations are valid. Future refactoring should simplify this code by hardening the image abstraction such that database details such as how image properties are stored do not have any influence here. """ updates = {} property_updates = image['properties'] for change in changes: path = change['path'] if len(path) == 1: assert change['op'] == 'replace' key = change['path'][0] if key == 'is_public' and change['value']: self._enforce(req, 'publicize_image') updates[key] = change['value'] else: assert len(path) == 2 assert path[0] == 'properties' update_method_name = '_do_%s_property' % change['op'] assert hasattr(self, update_method_name) update_method = getattr(self, update_method_name) update_method(property_updates, change) updates['properties'] = property_updates return updates def _do_replace_property(self, updates, change): """ Replace a single image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_add_property(self, updates, change): """ Add a new image property, ensuring it does not already exist. """ key = change['path'][1] if key in updates: msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_remove_property(self, updates, change): """ Remove an image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) del updates[key] @utils.mutating def delete(self, req, image_id): self._enforce(req, 'delete_image') image = self._get_image(req.context, image_id) if image['protected']: msg = _("Unable to delete as image %(image_id)s is protected" % locals()) raise webob.exc.HTTPForbidden(explanation=msg) if image['location'] and CONF.delayed_delete: status = 'pending_delete' else: status = 'deleted' try: self.db_api.image_update(req.context, image_id, {'status': status}) self.db_api.image_destroy(req.context, image_id) if image['location']: if CONF.delayed_delete: self.store_api.schedule_delayed_delete_from_backend( image['location'], id) else: self.store_api.safe_delete_from_backend(image['location'], req.context, id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to delete" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound() else: self.notifier.info('image.delete', image) class RequestDeserializer(wsgi.JSONRequestDeserializer): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'size', 'direct_url', 'self', 'file', 'schema'] _reserved_properties = ['owner', 'is_public', 'location', 'deleted', 'deleted_at'] _base_properties = ['checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'status', 'tags', 'updated_at', 'visibility', 'protected'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _parse_image(self, request): body = self._get_request_body(request) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) # Ensure all specified properties are allowed self._check_readonly(body) self._check_reserved(body) # Create a dict of base image properties, with user- and deployer- # defined properties contained in a 'properties' dictionary image = {'properties': body} for key in self._base_properties: try: image[key] = image['properties'].pop(key) except KeyError: pass if 'visibility' in image: image['is_public'] = image.pop('visibility') == 'public' return {'image': image} def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if not 'body' in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_readonly(cls, image): for key in cls._readonly_properties: if key in image: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) @classmethod def _check_reserved(cls, image): for key in cls._reserved_properties: if key in image: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) def create(self, request): return self._parse_image(request) def _get_change_operation(self, raw_change): op = None for key in ['replace', 'add', 'remove']: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path(self, raw_change, op): key = self._decode_json_pointer(raw_change[op]) if key in self._readonly_properties: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) if key in self._reserved_properties: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) # For image properties, we need to put "properties" at the beginning if key not in self._base_properties: return ['properties', key] return [key] def _decode_json_pointer(self, pointer): """ Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) return pointer.lstrip('/').replace('~1', '/').replace('~0', '~') def _validate_json_pointer(self, pointer): """ Validate a json pointer. We only accept a limited form of json pointers. Specifically, we do not allow multiple levels of indirection, so there can only be one '/' in the pointer, located at the start of the string. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if '/' in pointer[1:]: msg = _('Pointer `%s` contains more than one "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if re.match('~[^01]', pointer): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): if change['op'] == 'delete': return partial_image = {change['path'][-1]: change['value']} try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) def update(self, request): changes = [] valid_content_types = [ 'application/openstack-images-v2.0-json-patch' ] if request.content_type not in valid_content_types: headers = {'Accept-Patch': ','.join(valid_content_types)} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) op = self._get_change_operation(raw_change) path = self._get_change_path(raw_change, op) change = {'op': op, 'path': path} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) if change['path'] == ['visibility']: change['path'] = ['is_public'] change['value'] = change['value'] == 'public' changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s' % sort_dir) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.pop('visibility', None) if visibility: if visibility in ['public', 'private']: filters['is_public'] = visibility == 'public' else: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image['id'] if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _get_image_links(self, image): return [ {'rel': 'self', 'href': self._get_image_href(image)}, {'rel': 'file', 'href': self._get_image_href(image, 'file')}, {'rel': 'describedby', 'href': '/v2/schemas/image'}, ] def _format_image(self, image): #NOTE(bcwaldon): merge the contained properties dict with the # top-level image object image_view = image['properties'] attributes = ['id', 'name', 'disk_format', 'container_format', 'size', 'status', 'checksum', 'tags', 'protected', 'created_at', 'updated_at', 'min_ram', 'min_disk'] for key in attributes: image_view[key] = image[key] location = image['location'] if CONF.show_image_direct_url and location is not None: image_view['direct_url'] = location visibility = 'public' if image['is_public'] else 'private' image_view['visibility'] = visibility image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' self._serialize_datetimes(image_view) image_view = self.schema.filter(image_view) return image_view @staticmethod def _serialize_datetimes(image): for (key, value) in image.iteritems(): if isinstance(value, datetime.datetime): image[key] = timeutils.isotime(value) def create(self, response, image): response.status_int = 201 body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' response.location = self._get_image_href(image) def show(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def update(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urllib.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urllib.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = unicode(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 _BASE_PROPERTIES = { 'id': { 'type': 'string', 'description': 'An identifier for the image', 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': 'string', 'description': 'Descriptive name for the image', 'maxLength': 255, }, 'status': { 'type': 'string', 'description': 'Status of the image', 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'], }, 'visibility': { 'type': 'string', 'description': 'Scope of image accessibility', 'enum': ['public', 'private'], }, 'protected': { 'type': 'boolean', 'description': 'If true, image will not be deletable.', }, 'checksum': { 'type': 'string', 'description': 'md5 hash of image contents.', 'type': 'string', 'maxLength': 32, }, 'size': { 'type': 'integer', 'description': 'Size of image file in bytes', }, 'container_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['bare', 'ovf', 'ami', 'aki', 'ari'], }, 'disk_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', 'ari', 'ami'], }, 'created_at': { 'type': 'string', 'description': 'Date and time of image registration', #TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! #'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': 'Date and time of the last image modification', #'format': 'date-time', }, 'tags': { 'type': 'array', 'description': 'List of strings related to the image', 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'description': 'URL to access the image file kept in external store', }, 'min_ram': { 'type': 'integer', 'description': 'Amount of ram (in MB) required to boot image.', }, 'min_disk': { 'type': 'integer', 'description': 'Amount of disk space (in GB) required to boot image.', }, 'self': {'type': 'string'}, 'file': {'type': 'string'}, 'schema': {'type': 'string'}, } _BASE_LINKS = [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = copy.deepcopy(_BASE_PROPERTIES) links = copy.deepcopy(_BASE_LINKS) if CONF.allow_additional_image_properties: schema = glance.schema.PermissiveSchema('image', properties, links) else: schema = glance.schema.Schema('image', properties) schema.merge_properties(custom_properties or {}) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: schema_file = open(match) schema_data = schema_file.read() return json.loads(schema_data) else: msg = _('Could not find schema properties file %s. Continuing ' 'without custom properties') LOG.warn(msg % filename) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3785_0
crossvul-python_data_bad_2042_0
from django.core.exceptions import SuspiciousOperation class DisallowedModelAdminLookup(SuspiciousOperation): """Invalid filter was passed to admin view via URL querystring""" pass
./CrossVul/dataset_final_sorted/CWE-264/py/bad_2042_0
crossvul-python_data_good_3695_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import kvs from keystone import exception from keystone import token class Token(kvs.Base, token.Driver): # Public interface def get_token(self, token_id): token = self.db.get('token-%s' % token_id) if (token and (token['expires'] is None or token['expires'] > datetime.datetime.utcnow())): return token else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data: data_copy['expires'] = self._get_default_expire_time() self.db.set('token-%s' % token_id, data_copy) return copy.deepcopy(data_copy) def delete_token(self, token_id): try: return self.db.delete('token-%s' % token_id) except KeyError: raise exception.TokenNotFound(token_id=token_id) def list_tokens(self, user_id): tokens = [] now = datetime.datetime.utcnow() for token, user_ref in self.db.items(): if not token.startswith('token-'): continue if 'user' not in user_ref: continue if user_ref['user'].get('id') != user_id: continue if user_ref.get('expires') and user_ref.get('expires') < now: continue tokens.append(token.split('-', 1)[1]) return tokens
./CrossVul/dataset_final_sorted/CWE-264/py/good_3695_2
crossvul-python_data_good_5539_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import itertools import webob.exc from nova.openstack.common import excutils from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ # TODO(sandy): Find a way to import nova.notifier.api so we don't have # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( # TODO(johannes): Also, it would be nice to use # utils.save_and_reraise_exception() without an import loop def inner(f): def wrapped(*args, **kw): try: return f(*args, **kw) except Exception, e: with excutils.save_and_reraise_exception(): if notifier: payload = dict(args=args, exception=e) payload.update(kw) # Use a temp vars so we don't shadow # our outer definitions. temp_level = level if not temp_level: temp_level = notifier.ERROR temp_type = event_type if not temp_type: # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. temp_type = f.__name__ context = get_context_from_function_and_args(f, args, kw) notifier.notify(context, publisher_id, temp_type, temp_level, payload) return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.message super(NovaException, self).__init__(message) class EC2APIError(NovaException): message = _("Unknown") def __init__(self, message=None, code=None): self.msg = message self.code = code outstr = '%s' % message super(EC2APIError, self).__init__(outstr) class DBError(NovaException): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) class DeprecatedConfig(NovaException): message = _("Fatal call to deprecated config %(msg)s") class DecryptionFailure(NovaException): message = _("Failed to decrypt text") class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): message = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): message = _("Connection to glance host %(host)s:%(port)s failed: " "%(reason)s") class NotAuthorized(NovaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(NovaException): message = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot") + ": %(reason)s" class InvalidBDM(Invalid): message = _("Block Device Mapping is Invalid.") class InvalidBDMSnapshot(InvalidBDM): message = _("Block Device Mapping is Invalid: " "failed to get snapshot %(id)s.") class InvalidBDMVolume(InvalidBDM): message = _("Block Device Mapping is Invalid: " "failed to get volume %(id)s.") class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") class VolumeAttached(Invalid): message = _("Volume %(volume_id)s is still attached, detach volume first.") class InvalidKeypair(Invalid): message = _("Keypair data is invalid") class SfJsonEncodeFailure(NovaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidVolumeType(Invalid): message = _("Invalid volume type") + ": %(reason)s" class InvalidVolume(Invalid): message = _("Invalid volume") + ": %(reason)s" class InvalidMetadata(Invalid): message = _("Invalid metadata") + ": %(reason)s" class InvalidMetadataSize(Invalid): message = _("Invalid metadata size") + ": %(reason)s" class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): message = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): message = _("Invalid cidr %(cidr)s.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAggregateAction(Invalid): message = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): message = _("Group not valid. Reason: %(reason)s") class InvalidSortKey(Invalid): message = _("Sort key supplied was not valid.") class InstanceInvalidState(Invalid): message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") class InstanceNotInRescueMode(Invalid): message = _("Instance %(instance_id)s is not in rescue mode") class InstanceNotReady(Invalid): message = _("Instance %(instance_id)s is not ready") class InstanceSuspendFailure(Invalid): message = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): message = _("Failed to resume server") + ": %(reason)s." class InstanceRebootFailure(Invalid): message = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): message = _("Failed to terminate instance") + ": %(reason)s" class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class ComputeResourcesUnavailable(ServiceUnavailable): message = _("Insufficient compute resources.") class ComputeServiceUnavailable(ServiceUnavailable): message = _("Compute service is unavailable at this time.") class UnableToMigrateToSelf(Invalid): message = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class InvalidHypervisorType(Invalid): message = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): message = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): message = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") class DevicePathInUse(Invalid): message = _("The supplied device path (%(path)s) is in use.") class DeviceIsBusy(Invalid): message = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): message = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): message = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): message = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): message = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): message = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): message = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): message = _("Ec2 id %(ec2_id)s is unacceptable.") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class ConstraintNotMet(NovaException): message = _("Constraint not met.") code = 412 class NotFound(NovaException): message = _("Resource could not be found.") code = 404 class VirtDriverNotFound(NotFound): message = _("Could not find driver for connection_type %(name)s") class PersistentVolumeFileNotFound(NotFound): message = _("Volume %(volume_id)s persistence file could not be found.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class SfAccountNotFound(NotFound): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class VolumeIsBusy(NovaException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(NovaException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class ISCSITargetCreateFailed(NovaException): message = _("Failed to create iscsi target for volume %(volume_id)s.") class ISCSITargetRemoveFailed(NovaException): message = _("Failed to remove iscsi target for volume %(volume_id)s.") class DiskNotFound(NotFound): message = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): message = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ImageNotFoundEC2(ImageNotFound): message = _("Image %(image_id)s could not be found. The nova EC2 API " "assigns image ids dynamically when they are listed for the " "first time. Have you listed image ids since adding this " "image?") class ProjectNotFound(NotFound): message = _("Project %(project_id)s could not be found.") class StorageRepositoryNotFound(NotFound): message = _("Cannot find SR to read/write VDI.") class NetworkInUse(NovaException): message = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): message = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): message = _("Network %(network_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): message = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): message = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): message = _("No networks defined.") class NetworkNotFoundForProject(NotFound): message = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkHostNotSet(NovaException): message = _("Host is not set to the network (%(network_id)s).") class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") class PortInUse(NovaException): message = _("Port %(port_id)s is still in use.") class PortNotFound(NotFound): message = _("Port %(port_id)s could not be found.") class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): message = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): message = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForNetwork(FixedIpNotFound): message = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): message = _("Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s.") class FixedIpAssociatedWithMultipleInstances(NovaException): message = _("More than one instance is associated with fixed ip address " "'%(address)s'.") class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): message = _("Zero fixed ips could be found.") #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass class FloatingIpExists(Duplicate): message = _("Floating ip %(address)s already exists.") class FloatingIpNotFound(NotFound): message = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): message = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): message = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): message = _("Floating ip not found for host %(host)s.") class FloatingIpMultipleFoundForAddress(NovaException): message = _("Multiple floating ips are found for address %(address)s.") class FloatingIpPoolNotFound(NotFound): message = _("Floating ip pool not found.") safe = True class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") safe = True class FloatingIpAssociated(NovaException): message = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): message = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): message = _("Interface %(interface)s not found.") class CannotDisassociateAutoAssignedFloatingIP(NovaException): message = _("Cannot disassociate auto assigined floating ip") class KeypairNotFound(NotFound): message = _("Keypair %(name)s not found for user %(user_id)s") class CertificateNotFound(NotFound): message = _("Certificate %(certificate_id)s not found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): message = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): message = _("Quota could not be found") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(NovaException): message = _("Quota exceeded for resources: %(overs)s") class SecurityGroupNotFound(NotFound): message = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): message = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): message = _("Console pool %(pool_id)s could not be found.") class ConsolePoolNotFoundForHostType(NotFound): message = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): message = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): message = _("Invalid console type %(console_type)s ") class InstanceTypeNotFound(NotFound): message = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): message = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorAccessNotFound(NotFound): message = _("Flavor access not found for %(flavor_id) / " "%(project_id) combination.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no metadata with " "key %(metadata_key)s.") class InstanceSystemMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no system metadata with " "key %(metadata_key)s.") class InstanceTypeExtraSpecsNotFound(NotFound): message = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): message = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): message = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): message = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): message = _("Action not allowed.") class ImageRotationNotAllowed(NovaException): message = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): message = _("Rotation param is required for backup image_type") class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") class InstanceTypeExists(Duplicate): message = _("Instance Type %(name)s already exists.") class FlavorAccessExists(Duplicate): message = _("Flavor access alreay exists for flavor %(flavor_id)s " "and project %(project_id)s combination.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(name)s already exists.") class InvalidSharedStorage(NovaException): message = _("%(path)s is not on shared storage: %(reason)s") class InvalidLocalStorage(NovaException): message = _("%(path)s is not on local storage: %(reason)s") class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(NovaException): message = _("Malformed message body: %(reason)s") # NOTE(johannes): NotFound should only be used when a 404 error is # appropriate to be returned class ConfigNotFound(NovaException): message = _("Could not find config at %(path)s") class PasteAppNotFound(NovaException): message = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameFlavor(NovaException): message = _("When resizing, instances must change flavor!") class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") class InstanceTypeMemoryTooSmall(NovaException): message = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): message = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): message = _("Insufficient free memory on compute node to start %(uuid)s.") class CouldNotFetchMetrics(NovaException): message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") class NoValidHost(NovaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(NovaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(NovaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class TooManyInstances(QuotaError): message = _("Quota exceeded for %(overs)s: Requested %(req)s," " but already used %(used)d of %(allowed)d %(resource)s") class VolumeSizeTooLarge(QuotaError): message = _("Maximum volume size exceeded") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") class FloatingIpLimitExceeded(QuotaError): message = _("Maximum number of floating ips exceeded") class MetadataLimitExceeded(QuotaError): message = _("Maximum number of metadata items exceeds %(allowed)d") class OnsetFileLimitExceeded(QuotaError): message = _("Personality file limit exceeded") class OnsetFilePathLimitExceeded(QuotaError): message = _("Personality file path too long") class OnsetFileContentLimitExceeded(QuotaError): message = _("Personality file content too long") class KeypairLimitExceeded(QuotaError): message = _("Maximum number of key pairs exceeded") class SecurityGroupLimitExceeded(QuotaError): message = _("Maximum number of security groups or rules exceeded") class AggregateError(NovaException): message = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): message = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(Duplicate): message = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostExists(Duplicate): message = _("Aggregate %(aggregate_id)s already has host %(host)s.") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(NovaException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class VolumeBackendAPIException(NovaException): message = _("Bad or unexpected response from the storage volume " "backend API: %(data)s") class NfsException(NovaException): message = _("Unknown NFS exception") class NfsNoSharesMounted(NotFound): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(NotFound): message = _("There is no share which can host %(volume_size)sG") class InstanceTypeCreateFailed(NovaException): message = _("Unable to create instance type") class InstancePasswordSetFailed(NovaException): message = _("Failed to set admin password on %(instance)s " "because %(reason)s") safe = True class SolidFireAPIException(NovaException): message = _("Bad response from SolidFire API") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class DuplicateVlan(Duplicate): message = _("Detected existing vlan with id %(vlan)d") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class MarkerNotFound(NotFound): message = _("Marker %(marker)s could not be found.") class InvalidInstanceIDMalformed(Invalid): message = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): message = _("Could not fetch image %(image_id)s") class TaskAlreadyRunning(NovaException): message = _("Task %(task_name)s is already running on host %(host)s") class TaskNotRunning(NovaException): message = _("Task %(task_name)s is not running on host %(host)s") class InstanceIsLocked(InstanceInvalidState): message = _("Instance %(instance_uuid)s is locked") class ConfigDriveMountFailed(NovaException): message = _("Could not mount vfat config drive. %(operation)s failed. " "Error: %(error)s") class ConfigDriveUnknownFormat(NovaException): message = _("Unknown config drive format %(format)s. Select one of " "iso9660 or vfat.") class InstanceUserDataTooLarge(NovaException): message = _("User data too large. User data must be no larger than " "%(maxsize)s bytes once base64 encoded. Your data is " "%(length)d bytes") class InstanceUserDataMalformed(NovaException): message = _("User data needs to be valid base 64.") class UnexpectedTaskStateError(NovaException): message = _("unexpected task state: expecting %(expected)s but " "the actual state is %(actual)s") class CryptoCAFileNotFound(FileNotFound): message = _("The CA file for %(project)s could not be found") class CryptoCRLFileNotFound(FileNotFound): message = _("The CRL file for %(project)s could not be found") def get_context_from_function_and_args(function, args, kwargs): """Find an arg of type RequestContext and return it. This is useful in a couple of decorators where we don't know much about the function we're wrapping. """ # import here to avoid circularity: from nova import context for arg in itertools.chain(kwargs.values(), args): if isinstance(arg, context.RequestContext): return arg return None
./CrossVul/dataset_final_sorted/CWE-264/py/good_5539_1
crossvul-python_data_bad_3632_1
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom from webob import exc import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3632_1
crossvul-python_data_good_3634_5
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova.openstack.common import cfg from nova import flags quota_opts = [ cfg.IntOpt('quota_instances', default=10, help='number of instances allowed per project'), cfg.IntOpt('quota_cores', default=20, help='number of instance cores allowed per project'), cfg.IntOpt('quota_ram', default=50 * 1024, help='megabytes of instance ram allowed per project'), cfg.IntOpt('quota_volumes', default=10, help='number of volumes allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of volume gigabytes allowed per project'), cfg.IntOpt('quota_floating_ips', default=10, help='number of floating ips allowed per project'), cfg.IntOpt('quota_metadata_items', default=128, help='number of metadata items allowed per instance'), cfg.IntOpt('quota_max_injected_files', default=5, help='number of injected files allowed'), cfg.IntOpt('quota_max_injected_file_content_bytes', default=10 * 1024, help='number of bytes allowed per injected file'), cfg.IntOpt('quota_max_injected_file_path_bytes', default=255, help='number of bytes allowed per injected file path'), cfg.IntOpt('quota_security_groups', default=10, help='number of security groups per project'), cfg.IntOpt('quota_security_group_rules', default=20, help='number of security rules per security group'), ] FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_max_injected_files, 'injected_file_content_bytes': FLAGS.quota_max_injected_file_content_bytes, 'security_groups': FLAGS.quota_security_groups, 'security_group_rules': FLAGS.quota_security_group_rules, } # -1 in the quota flags means unlimited for key in defaults.keys(): if defaults[key] == -1: defaults[key] = None return defaults def get_project_quotas(context, project_id): rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: rval[key] = quota[key] return rval def _get_request_allotment(requested, used, quota): if quota is None: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) if instance_type['vcpus']: allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus']) if instance_type['memory_mb']: allowed_instances = min(allowed_instances, allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def allowed_security_groups(context, requested_security_groups): """Check quota and return min(requested, allowed) security groups.""" project_id = context.project_id context = context.elevated() used_sec_groups = db.security_group_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_sec_groups = _get_request_allotment(requested_security_groups, used_sec_groups, quota['security_groups']) return min(requested_security_groups, allowed_sec_groups) def allowed_security_group_rules(context, security_group_id, requested_rules): """Check quota and return min(requested, allowed) sec group rules.""" project_id = context.project_id context = context.elevated() used_rules = db.security_group_rule_count_by_group(context, security_group_id) quota = get_project_quotas(context, project_id) allowed_rules = _get_request_allotment(requested_rules, used_rules, quota['security_group_rules']) return min(requested_rules, allowed_rules) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_5
crossvul-python_data_good_1622_1
import argparse from binascii import hexlify from datetime import datetime from operator import attrgetter import functools import io import os import stat import sys import textwrap from attic import __version__ from attic.archive import Archive, ArchiveChecker from attic.repository import Repository from attic.cache import Cache from attic.key import key_creator from attic.helpers import Error, location_validator, format_time, \ format_file_mode, ExcludePattern, exclude_path, adjust_patterns, to_localtime, \ get_cache_dir, get_keys_dir, format_timedelta, prune_within, prune_split, \ Manifest, remove_surrogates, update_excludes, format_archive, check_extension_modules, Statistics, \ is_cachedir, bigint_to_int from attic.remote import RepositoryServer, RemoteRepository class Archiver: def __init__(self): self.exit_code = 0 def open_repository(self, location, create=False, exclusive=False): if location.proto == 'ssh': repository = RemoteRepository(location, create=create) else: repository = Repository(location.path, create=create, exclusive=exclusive) repository._location = location return repository def print_error(self, msg, *args): msg = args and msg % args or msg self.exit_code = 1 print('attic: ' + msg, file=sys.stderr) def print_verbose(self, msg, *args, **kw): if self.verbose: msg = args and msg % args or msg if kw.get('newline', True): print(msg) else: print(msg, end=' ') def do_serve(self, args): """Start Attic in server mode. This command is usually not used manually. """ return RepositoryServer(restrict_to_paths=args.restrict_to_paths).serve() def do_init(self, args): """Initialize an empty repository""" print('Initializing repository at "%s"' % args.repository.orig) repository = self.open_repository(args.repository, create=True, exclusive=True) key = key_creator(repository, args) manifest = Manifest(key, repository) manifest.key = key manifest.write() repository.commit() Cache(repository, key, manifest, warn_if_unencrypted=False) return self.exit_code def do_check(self, args): """Check repository consistency""" repository = self.open_repository(args.repository, exclusive=args.repair) if args.repair: while not os.environ.get('ATTIC_CHECK_I_KNOW_WHAT_I_AM_DOING'): self.print_error("""Warning: 'check --repair' is an experimental feature that might result in data loss. Type "Yes I am sure" if you understand this and want to continue.\n""") if input('Do you want to continue? ') == 'Yes I am sure': break if not args.archives_only: print('Starting repository check...') if repository.check(repair=args.repair): print('Repository check complete, no problems found.') else: return 1 if not args.repo_only and not ArchiveChecker().check(repository, repair=args.repair): return 1 return 0 def do_change_passphrase(self, args): """Change repository key file passphrase""" repository = self.open_repository(args.repository) manifest, key = Manifest.load(repository) key.change_passphrase() return 0 def do_create(self, args): """Create new archive""" t0 = datetime.now() repository = self.open_repository(args.archive, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache, create=True, checkpoint_interval=args.checkpoint_interval, numeric_owner=args.numeric_owner) # Add Attic cache dir to inode_skip list skip_inodes = set() try: st = os.stat(get_cache_dir()) skip_inodes.add((st.st_ino, st.st_dev)) except IOError: pass # Add local repository dir to inode_skip list if not args.archive.host: try: st = os.stat(args.archive.path) skip_inodes.add((st.st_ino, st.st_dev)) except IOError: pass for path in args.paths: path = os.path.normpath(path) if args.dontcross: try: restrict_dev = os.lstat(path).st_dev except OSError as e: self.print_error('%s: %s', path, e) continue else: restrict_dev = None self._process(archive, cache, args.excludes, args.exclude_caches, skip_inodes, path, restrict_dev) archive.save() if args.stats: t = datetime.now() diff = t - t0 print('-' * 78) print('Archive name: %s' % args.archive.archive) print('Archive fingerprint: %s' % hexlify(archive.id).decode('ascii')) print('Start time: %s' % t0.strftime('%c')) print('End time: %s' % t.strftime('%c')) print('Duration: %s' % format_timedelta(diff)) print('Number of files: %d' % archive.stats.nfiles) archive.stats.print_('This archive:', cache) print('-' * 78) return self.exit_code def _process(self, archive, cache, excludes, exclude_caches, skip_inodes, path, restrict_dev): if exclude_path(path, excludes): return try: st = os.lstat(path) except OSError as e: self.print_error('%s: %s', path, e) return if (st.st_ino, st.st_dev) in skip_inodes: return # Entering a new filesystem? if restrict_dev and st.st_dev != restrict_dev: return # Ignore unix sockets if stat.S_ISSOCK(st.st_mode): return self.print_verbose(remove_surrogates(path)) if stat.S_ISREG(st.st_mode): try: archive.process_file(path, st, cache) except IOError as e: self.print_error('%s: %s', path, e) elif stat.S_ISDIR(st.st_mode): if exclude_caches and is_cachedir(path): return archive.process_item(path, st) try: entries = os.listdir(path) except OSError as e: self.print_error('%s: %s', path, e) else: for filename in sorted(entries): self._process(archive, cache, excludes, exclude_caches, skip_inodes, os.path.join(path, filename), restrict_dev) elif stat.S_ISLNK(st.st_mode): archive.process_symlink(path, st) elif stat.S_ISFIFO(st.st_mode): archive.process_item(path, st) elif stat.S_ISCHR(st.st_mode) or stat.S_ISBLK(st.st_mode): archive.process_dev(path, st) else: self.print_error('Unknown file type: %s', path) def do_extract(self, args): """Extract archive contents""" # be restrictive when restoring files, restore permissions later os.umask(0o077) repository = self.open_repository(args.archive) manifest, key = Manifest.load(repository) archive = Archive(repository, key, manifest, args.archive.archive, numeric_owner=args.numeric_owner) patterns = adjust_patterns(args.paths, args.excludes) dry_run = args.dry_run strip_components = args.strip_components dirs = [] for item in archive.iter_items(lambda item: not exclude_path(item[b'path'], patterns), preload=True): orig_path = item[b'path'] if strip_components: item[b'path'] = os.sep.join(orig_path.split(os.sep)[strip_components:]) if not item[b'path']: continue if not args.dry_run: while dirs and not item[b'path'].startswith(dirs[-1][b'path']): archive.extract_item(dirs.pop(-1)) self.print_verbose(remove_surrogates(orig_path)) try: if dry_run: archive.extract_item(item, dry_run=True) else: if stat.S_ISDIR(item[b'mode']): dirs.append(item) archive.extract_item(item, restore_attrs=False) else: archive.extract_item(item) except IOError as e: self.print_error('%s: %s', remove_surrogates(orig_path), e) if not args.dry_run: while dirs: archive.extract_item(dirs.pop(-1)) return self.exit_code def do_delete(self, args): """Delete an existing archive""" repository = self.open_repository(args.archive, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache) stats = Statistics() archive.delete(stats) manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code def do_mount(self, args): """Mount archive or an entire repository as a FUSE fileystem""" try: from attic.fuse import AtticOperations except ImportError: self.print_error('the "llfuse" module is required to use this feature') return self.exit_code if not os.path.isdir(args.mountpoint) or not os.access(args.mountpoint, os.R_OK | os.W_OK | os.X_OK): self.print_error('%s: Mountpoint must be a writable directory' % args.mountpoint) return self.exit_code repository = self.open_repository(args.src) manifest, key = Manifest.load(repository) if args.src.archive: archive = Archive(repository, key, manifest, args.src.archive) else: archive = None operations = AtticOperations(key, repository, manifest, archive) self.print_verbose("Mounting filesystem") try: operations.mount(args.mountpoint, args.options, args.foreground) except RuntimeError: # Relevant error message already printed to stderr by fuse self.exit_code = 1 return self.exit_code def do_list(self, args): """List archive or repository contents""" repository = self.open_repository(args.src) manifest, key = Manifest.load(repository) if args.src.archive: tmap = {1: 'p', 2: 'c', 4: 'd', 6: 'b', 0o10: '-', 0o12: 'l', 0o14: 's'} archive = Archive(repository, key, manifest, args.src.archive) for item in archive.iter_items(): type = tmap.get(item[b'mode'] // 4096, '?') mode = format_file_mode(item[b'mode']) size = 0 if type == '-': try: size = sum(size for _, size, _ in item[b'chunks']) except KeyError: pass mtime = format_time(datetime.fromtimestamp(bigint_to_int(item[b'mtime']) / 1e9)) if b'source' in item: if type == 'l': extra = ' -> %s' % item[b'source'] else: type = 'h' extra = ' link to %s' % item[b'source'] else: extra = '' print('%s%s %-6s %-6s %8d %s %s%s' % (type, mode, item[b'user'] or item[b'uid'], item[b'group'] or item[b'gid'], size, mtime, remove_surrogates(item[b'path']), extra)) else: for archive in sorted(Archive.list_archives(repository, key, manifest), key=attrgetter('ts')): print(format_archive(archive)) return self.exit_code def do_info(self, args): """Show archive details such as disk space used""" repository = self.open_repository(args.archive) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache) stats = archive.calc_stats(cache) print('Name:', archive.name) print('Fingerprint: %s' % hexlify(archive.id).decode('ascii')) print('Hostname:', archive.metadata[b'hostname']) print('Username:', archive.metadata[b'username']) print('Time: %s' % to_localtime(archive.ts).strftime('%c')) print('Command line:', remove_surrogates(' '.join(archive.metadata[b'cmdline']))) print('Number of files: %d' % stats.nfiles) stats.print_('This archive:', cache) return self.exit_code def do_prune(self, args): """Prune repository archives according to specified rules""" repository = self.open_repository(args.repository, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archives = list(sorted(Archive.list_archives(repository, key, manifest, cache), key=attrgetter('ts'), reverse=True)) if args.hourly + args.daily + args.weekly + args.monthly + args.yearly == 0 and args.within is None: self.print_error('At least one of the "within", "hourly", "daily", "weekly", "monthly" or "yearly" ' 'settings must be specified') return 1 if args.prefix: archives = [archive for archive in archives if archive.name.startswith(args.prefix)] keep = [] if args.within: keep += prune_within(archives, args.within) if args.hourly: keep += prune_split(archives, '%Y-%m-%d %H', args.hourly, keep) if args.daily: keep += prune_split(archives, '%Y-%m-%d', args.daily, keep) if args.weekly: keep += prune_split(archives, '%G-%V', args.weekly, keep) if args.monthly: keep += prune_split(archives, '%Y-%m', args.monthly, keep) if args.yearly: keep += prune_split(archives, '%Y', args.yearly, keep) keep.sort(key=attrgetter('ts'), reverse=True) to_delete = [a for a in archives if a not in keep] stats = Statistics() for archive in keep: self.print_verbose('Keeping archive: %s' % format_archive(archive)) for archive in to_delete: if args.dry_run: self.print_verbose('Would prune: %s' % format_archive(archive)) else: self.print_verbose('Pruning archive: %s' % format_archive(archive)) archive.delete(stats) if to_delete and not args.dry_run: manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code helptext = {} helptext['patterns'] = ''' Exclude patterns use a variant of shell pattern syntax, with '*' matching any number of characters, '?' matching any single character, '[...]' matching any single character specified, including ranges, and '[!...]' matching any character not specified. For the purpose of these patterns, the path separator ('\\' for Windows and '/' on other systems) is not treated specially. For a path to match a pattern, it must completely match from start to end, or must match from the start to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a '*' is appended before matching is attempted. Patterns with wildcards should be quoted to protect them from shell expansion. Examples: # Exclude '/home/user/file.o' but not '/home/user/file.odt': $ attic create -e '*.o' repo.attic / # Exclude '/home/user/junk' and '/home/user/subdir/junk' but # not '/home/user/importantjunk' or '/etc/junk': $ attic create -e '/home/*/junk' repo.attic / # Exclude the contents of '/home/user/cache' but not the directory itself: $ attic create -e /home/user/cache/ repo.attic / # The file '/home/user/cache/important' is *not* backed up: $ attic create -e /home/user/cache/ repo.attic / /home/user/cache/important ''' def do_help(self, parser, commands, args): if not args.topic: parser.print_help() elif args.topic in self.helptext: print(self.helptext[args.topic]) elif args.topic in commands: if args.epilog_only: print(commands[args.topic].epilog) elif args.usage_only: commands[args.topic].epilog = None commands[args.topic].print_help() else: commands[args.topic].print_help() else: parser.error('No help available on %s' % (args.topic,)) return self.exit_code def preprocess_args(self, args): deprecations = [ ('--hourly', '--keep-hourly', 'Warning: "--hourly" has been deprecated. Use "--keep-hourly" instead.'), ('--daily', '--keep-daily', 'Warning: "--daily" has been deprecated. Use "--keep-daily" instead.'), ('--weekly', '--keep-weekly', 'Warning: "--weekly" has been deprecated. Use "--keep-weekly" instead.'), ('--monthly', '--keep-monthly', 'Warning: "--monthly" has been deprecated. Use "--keep-monthly" instead.'), ('--yearly', '--keep-yearly', 'Warning: "--yearly" has been deprecated. Use "--keep-yearly" instead.') ] if args and args[0] == 'verify': print('Warning: "attic verify" has been deprecated. Use "attic extract --dry-run" instead.') args = ['extract', '--dry-run'] + args[1:] for i, arg in enumerate(args[:]): for old_name, new_name, warning in deprecations: if arg.startswith(old_name): args[i] = arg.replace(old_name, new_name) print(warning) return args def run(self, args=None): check_extension_modules() keys_dir = get_keys_dir() if not os.path.exists(keys_dir): os.makedirs(keys_dir) os.chmod(keys_dir, stat.S_IRWXU) cache_dir = get_cache_dir() if not os.path.exists(cache_dir): os.makedirs(cache_dir) os.chmod(cache_dir, stat.S_IRWXU) with open(os.path.join(cache_dir, 'CACHEDIR.TAG'), 'w') as fd: fd.write(textwrap.dedent(""" Signature: 8a477f597d28d172789f06886806bc55 # This file is a cache directory tag created by Attic. # For information about cache directory tags, see: # http://www.brynosaurus.com/cachedir/ """).lstrip()) common_parser = argparse.ArgumentParser(add_help=False) common_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='verbose output') # We can't use argparse for "serve" since we don't want it to show up in "Available commands" if args: args = self.preprocess_args(args) parser = argparse.ArgumentParser(description='Attic %s - Deduplicated Backups' % __version__) subparsers = parser.add_subparsers(title='Available commands') subparser = subparsers.add_parser('serve', parents=[common_parser], description=self.do_serve.__doc__) subparser.set_defaults(func=self.do_serve) subparser.add_argument('--restrict-to-path', dest='restrict_to_paths', action='append', metavar='PATH', help='restrict repository access to PATH') init_epilog = textwrap.dedent(""" This command initializes an empty repository. A repository is a filesystem directory containing the deduplicated data from zero or more archives. Encryption can be enabled at repository init time. """) subparser = subparsers.add_parser('init', parents=[common_parser], description=self.do_init.__doc__, epilog=init_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_init) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to create') subparser.add_argument('-e', '--encryption', dest='encryption', choices=('none', 'passphrase', 'keyfile'), default='none', help='select encryption method') check_epilog = textwrap.dedent(""" The check command verifies the consistency of a repository and the corresponding archives. The underlying repository data files are first checked to detect bit rot and other types of damage. After that the consistency and correctness of the archive metadata is verified. The archive metadata checks can be time consuming and requires access to the key file and/or passphrase if encryption is enabled. These checks can be skipped using the --repository-only option. """) subparser = subparsers.add_parser('check', parents=[common_parser], description=self.do_check.__doc__, epilog=check_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_check) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to check consistency of') subparser.add_argument('--repository-only', dest='repo_only', action='store_true', default=False, help='only perform repository checks') subparser.add_argument('--archives-only', dest='archives_only', action='store_true', default=False, help='only perform archives checks') subparser.add_argument('--repair', dest='repair', action='store_true', default=False, help='attempt to repair any inconsistencies found') change_passphrase_epilog = textwrap.dedent(""" The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. """) subparser = subparsers.add_parser('change-passphrase', parents=[common_parser], description=self.do_change_passphrase.__doc__, epilog=change_passphrase_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_change_passphrase) subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False)) create_epilog = textwrap.dedent(""" This command creates a backup archive containing all files found while recursively traversing all paths specified. The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. See "attic help patterns" for more help on exclude patterns. """) subparser = subparsers.add_parser('create', parents=[common_parser], description=self.do_create.__doc__, epilog=create_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_create) subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the created archive') subparser.add_argument('-e', '--exclude', dest='excludes', type=ExcludePattern, action='append', metavar="PATTERN", help='exclude paths matching PATTERN') subparser.add_argument('--exclude-from', dest='exclude_files', type=argparse.FileType('r'), action='append', metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line') subparser.add_argument('--exclude-caches', dest='exclude_caches', action='store_true', default=False, help='exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html)') subparser.add_argument('-c', '--checkpoint-interval', dest='checkpoint_interval', type=int, default=300, metavar='SECONDS', help='write checkpoint every SECONDS seconds (Default: 300)') subparser.add_argument('--do-not-cross-mountpoints', dest='dontcross', action='store_true', default=False, help='do not cross mount points') subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true', default=False, help='only store numeric user and group identifiers') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to create') subparser.add_argument('paths', metavar='PATH', nargs='+', type=str, help='paths to archive') extract_epilog = textwrap.dedent(""" This command extracts the contents of an archive. By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of ``PATHs`` as arguments. The file selection can further be restricted by using the ``--exclude`` option. See "attic help patterns" for more help on exclude patterns. """) subparser = subparsers.add_parser('extract', parents=[common_parser], description=self.do_extract.__doc__, epilog=extract_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_extract) subparser.add_argument('-n', '--dry-run', dest='dry_run', default=False, action='store_true', help='do not actually change any files') subparser.add_argument('-e', '--exclude', dest='excludes', type=ExcludePattern, action='append', metavar="PATTERN", help='exclude paths matching PATTERN') subparser.add_argument('--exclude-from', dest='exclude_files', type=argparse.FileType('r'), action='append', metavar='EXCLUDEFILE', help='read exclude patterns from EXCLUDEFILE, one per line') subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true', default=False, help='only obey numeric user and group identifiers') subparser.add_argument('--strip-components', dest='strip_components', type=int, default=0, metavar='NUMBER', help='Remove the specified number of leading path elements. Pathnames with fewer elements will be silently skipped.') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to extract') subparser.add_argument('paths', metavar='PATH', nargs='*', type=str, help='paths to extract') delete_epilog = textwrap.dedent(""" This command deletes an archive from the repository. Any disk space not shared with any other existing archive is also reclaimed. """) subparser = subparsers.add_parser('delete', parents=[common_parser], description=self.do_delete.__doc__, epilog=delete_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_delete) subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the deleted archive') subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to delete') list_epilog = textwrap.dedent(""" This command lists the contents of a repository or an archive. """) subparser = subparsers.add_parser('list', parents=[common_parser], description=self.do_list.__doc__, epilog=list_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_list) subparser.add_argument('src', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(), help='repository/archive to list contents of') mount_epilog = textwrap.dedent(""" This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the ``--foreground`` option is given the command will run in the background until the filesystem is ``umounted``. """) subparser = subparsers.add_parser('mount', parents=[common_parser], description=self.do_mount.__doc__, epilog=mount_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_mount) subparser.add_argument('src', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(), help='repository/archive to mount') subparser.add_argument('mountpoint', metavar='MOUNTPOINT', type=str, help='where to mount filesystem') subparser.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='stay in foreground, do not daemonize') subparser.add_argument('-o', dest='options', type=str, help='Extra mount options') info_epilog = textwrap.dedent(""" This command displays some detailed information about the specified archive. """) subparser = subparsers.add_parser('info', parents=[common_parser], description=self.do_info.__doc__, epilog=info_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_info) subparser.add_argument('archive', metavar='ARCHIVE', type=location_validator(archive=True), help='archive to display information about') prune_epilog = textwrap.dedent(""" The prune command prunes a repository by deleting archives not matching any of the specified retention options. This command is normally used by automated backup scripts wanting to keep a certain number of historic backups. As an example, "-d 7" means to keep the latest backup on each day for 7 days. Days without backups do not count towards the total. The rules are applied from hourly to yearly, and backups selected by previous rules do not count towards those of later rules. The time that each backup completes is used for pruning purposes. Dates and times are interpreted in the local timezone, and weeks go from Monday to Sunday. Specifying a negative number of archives to keep means that there is no limit. The "--keep-within" option takes an argument of the form "<int><char>", where char is "H", "d", "w", "m", "y". For example, "--keep-within 2d" means to keep all archives that were created within the past 48 hours. "1m" is taken to mean "31d". The archives kept with this option do not count towards the totals specified by any other options. If a prefix is set with -p, then only archives that start with the prefix are considered for deletion and only those archives count towards the totals specified by the rules. """) subparser = subparsers.add_parser('prune', parents=[common_parser], description=self.do_prune.__doc__, epilog=prune_epilog, formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=self.do_prune) subparser.add_argument('-n', '--dry-run', dest='dry_run', default=False, action='store_true', help='do not change repository') subparser.add_argument('-s', '--stats', dest='stats', action='store_true', default=False, help='print statistics for the deleted archive') subparser.add_argument('--keep-within', dest='within', type=str, metavar='WITHIN', help='keep all archives within this time interval') subparser.add_argument('-H', '--keep-hourly', dest='hourly', type=int, default=0, help='number of hourly archives to keep') subparser.add_argument('-d', '--keep-daily', dest='daily', type=int, default=0, help='number of daily archives to keep') subparser.add_argument('-w', '--keep-weekly', dest='weekly', type=int, default=0, help='number of weekly archives to keep') subparser.add_argument('-m', '--keep-monthly', dest='monthly', type=int, default=0, help='number of monthly archives to keep') subparser.add_argument('-y', '--keep-yearly', dest='yearly', type=int, default=0, help='number of yearly archives to keep') subparser.add_argument('-p', '--prefix', dest='prefix', type=str, help='only consider archive names starting with this prefix') subparser.add_argument('repository', metavar='REPOSITORY', type=location_validator(archive=False), help='repository to prune') subparser = subparsers.add_parser('help', parents=[common_parser], description='Extra help') subparser.add_argument('--epilog-only', dest='epilog_only', action='store_true', default=False) subparser.add_argument('--usage-only', dest='usage_only', action='store_true', default=False) subparser.set_defaults(func=functools.partial(self.do_help, parser, subparsers.choices)) subparser.add_argument('topic', metavar='TOPIC', type=str, nargs='?', help='additional help on TOPIC') args = parser.parse_args(args or ['-h']) self.verbose = args.verbose update_excludes(args) return args.func(args) def main(): # Make sure stdout and stderr have errors='replace') to avoid unicode # issues when print()-ing unicode file names sys.stdout = io.TextIOWrapper(sys.stdout.buffer, sys.stdout.encoding, 'replace', line_buffering=True) sys.stderr = io.TextIOWrapper(sys.stderr.buffer, sys.stderr.encoding, 'replace', line_buffering=True) archiver = Archiver() try: exit_code = archiver.run(sys.argv[1:]) except Error as e: archiver.print_error(e.get_message()) exit_code = e.exit_code except KeyboardInterrupt: archiver.print_error('Error: Keyboard interrupt') exit_code = 1 else: if exit_code: archiver.print_error('Exiting with failure status due to previous errors') sys.exit(exit_code) if __name__ == '__main__': main()
./CrossVul/dataset_final_sorted/CWE-264/py/good_1622_1
crossvul-python_data_good_3695_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import sql from keystone import exception from keystone import token class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) @classmethod def from_dict(cls, token_dict): # shove any non-indexed properties into extra extra = copy.deepcopy(token_dict) data = {} for k in ('id', 'expires'): data[k] = extra.pop(k, None) data['extra'] = extra return cls(**data) def to_dict(self): out = copy.deepcopy(self.extra) out['id'] = self.id out['expires'] = self.expires return out class Token(sql.Base, token.Driver): # Public interface def get_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel).filter_by(id=token_id).first() now = datetime.datetime.utcnow() if token_ref and (not token_ref.expires or now < token_ref.expires): return token_ref.to_dict() else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data_copy: data_copy['expires'] = self._get_default_expire_time() token_ref = TokenModel.from_dict(data_copy) token_ref.id = token_id session = self.get_session() with session.begin(): session.add(token_ref) session.flush() return token_ref.to_dict() def delete_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel)\ .filter_by(id=token_id)\ .first() if not token_ref: raise exception.TokenNotFound(token_id=token_id) with session.begin(): session.delete(token_ref) session.flush() def list_tokens(self, user_id): session = self.get_session() tokens = [] now = datetime.datetime.utcnow() for token_ref in session.query(TokenModel)\ .filter(TokenModel.expires > now): token_ref_dict = token_ref.to_dict() if 'user' not in token_ref_dict: continue if token_ref_dict['user'].get('id') != user_id: continue tokens.append(token_ref['id']) return tokens
./CrossVul/dataset_final_sorted/CWE-264/py/good_3695_3
crossvul-python_data_good_3632_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the nova.db namespace. Call these functions from nova.db namespace, not the nova.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova.openstack.common import cfg from nova import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('instance_name_template', default='instance-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('volume_name_template', default='volume-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%08x', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='nova.db.sqlalchemy.api') class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available targets""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_get_all(context): """Get all computeNodes.""" return IMPL.compute_node_get_all(context) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values, auto_adjust=True): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values, auto_adjust) def compute_node_get_by_host(context, host): return IMPL.compute_node_get_by_host(context, host) def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): return IMPL.compute_node_utilization_update(context, host, free_ram_mb_delta, free_disk_gb_delta, work_delta, vm_delta) def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): return IMPL.compute_node_utilization_set(context, host, free_ram_mb, free_disk_gb, work, vms) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_get_pools(context): """Returns a list of floating ip pools""" return IMPL.floating_ip_get_pools(context) def floating_ip_allocate_address(context, project_id, pool): """Allocate free floating ip from specified pool and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id, pool) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_by_fixed_address(context, fixed_address): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) def dnsdomain_list(context): """Get a list of all zones in our database, public and private.""" return IMPL.dnsdomain_list(context) def dnsdomain_register_for_zone(context, fqdomain, zone): """Associated a DNS domain with an availability zone""" return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) def dnsdomain_register_for_project(context, fqdomain, project): """Associated a DNS domain with a project id""" return IMPL.dnsdomain_register_for_project(context, fqdomain, project) def dnsdomain_unregister(context, fqdomain): """Purge associations for the specified DNS zone""" return IMPL.dnsdomain_unregister(context, fqdomain) def dnsdomain_get(context, fqdomain): """Get the db record for the specified domain.""" return IMPL.dnsdomain_get(context, fqdomain) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_all_unconfirmed(context, confirm_window): """Finds all unconfirmed migrations within the confirmation window.""" return IMPL.migration_get_all_unconfirmed(context, confirm_window) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get(context, id): """Get fixed ip by id or raise if it does not exist.""" return IMPL.fixed_ip_get(context, id) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ips_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ips_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) def virtual_interface_get_all(context): """Gets all virtual interfaces from the table""" return IMPL.virtual_interface_get_all(context) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc'): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window) def instance_test_and_set(context, instance_id, attr, ok_states, new_state): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ return IMPL.instance_test_and_set( context, instance_id, attr, ok_states, new_state) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_uuid): """Get instance actions by instance uuid.""" return IMPL.instance_get_actions(context, instance_uuid) def instance_get_id_to_uuid_mapping(context, ids): """Return a dictionary containing 'ID: UUID' given the ids""" return IMPL.instance_get_id_to_uuid_mapping(context, ids) ################### def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ return IMPL.instance_info_cache_create(context, values) def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return IMPL.instance_info_cache_get(context, instance_uuid) def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ return IMPL.instance_info_cache_update(context, instance_uuid, values) def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ return IMPL.instance_info_cache_delete(context, instance_uuid) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id, host=None): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id, host) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) def quota_class_destroy(context, class_name, resource): """Destroy the quota class or raise if it does not exist.""" return IMPL.quota_class_destroy(context, class_name, resource) def quota_class_destroy_all_by_name(context, class_name): """Destroy all quotas associated with a given quota class.""" return IMPL.quota_class_destroy_all_by_name(context, class_name) ################### def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" return IMPL.security_group_in_use(context, group_id) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) def security_group_count_by_project(context, project_id): """Count number of security groups in a project.""" return IMPL.security_group_count_by_project(context, project_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) def security_group_rule_count_by_group(context, security_group_id): """Count rules in a given security group.""" return IMPL.security_group_rule_count_by_group(context, security_group_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False, filters=None): """Get all instance types.""" return IMPL.instance_type_get_all( context, inactive=inactive, filters=filters) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) #################### def cell_create(context, values): """Create a new child Cell entry.""" return IMPL.cell_create(context, values) def cell_update(context, cell_id, values): """Update a child Cell entry.""" return IMPL.cell_update(context, cell_id, values) def cell_delete(context, cell_id): """Delete a child Cell.""" return IMPL.cell_delete(context, cell_id) def cell_get(context, cell_id): """Get a specific child Cell.""" return IMPL.cell_get(context, cell_id) def cell_get_all(context): """Get all child Cells.""" return IMPL.cell_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def bw_usage_get_by_macs(context, macs, start_period): """Return bw usages for an instance in a given audit period.""" return IMPL.bw_usage_get_by_macs(context, macs, start_period) def bw_usage_update(context, mac, start_period, bw_in, bw_out): """Update cached bw usage for an instance and network Creates new record if needed.""" return IMPL.bw_usage_update(context, mac, start_period, bw_in, bw_out) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) ################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" return IMPL.s3_image_get(context, image_id) def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" return IMPL.s3_image_get_by_uuid(context, image_uuid) def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" return IMPL.s3_image_create(context, image_uuid) #################### def sm_backend_conf_create(context, values): """Create a new SM Backend Config entry.""" return IMPL.sm_backend_conf_create(context, values) def sm_backend_conf_update(context, sm_backend_conf_id, values): """Update a SM Backend Config entry.""" return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) def sm_backend_conf_delete(context, sm_backend_conf_id): """Delete a SM Backend Config.""" return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) def sm_backend_conf_get(context, sm_backend_conf_id): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) def sm_backend_conf_get_by_sr(context, sr_uuid): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) def sm_backend_conf_get_all(context): """Get all SM Backend Configs.""" return IMPL.sm_backend_conf_get_all(context) #################### def sm_flavor_create(context, values): """Create a new SM Flavor entry.""" return IMPL.sm_flavor_create(context, values) def sm_flavor_update(context, sm_flavor_id, values): """Update a SM Flavor entry.""" return IMPL.sm_flavor_update(context, values) def sm_flavor_delete(context, sm_flavor_id): """Delete a SM Flavor.""" return IMPL.sm_flavor_delete(context, sm_flavor_id) def sm_flavor_get(context, sm_flavor): """Get a specific SM Flavor.""" return IMPL.sm_flavor_get(context, sm_flavor) def sm_flavor_get_all(context): """Get all SM Flavors.""" return IMPL.sm_flavor_get_all(context) #################### def sm_volume_create(context, values): """Create a new child Zone entry.""" return IMPL.sm_volume_create(context, values) def sm_volume_update(context, volume_id, values): """Update a child Zone entry.""" return IMPL.sm_volume_update(context, values) def sm_volume_delete(context, volume_id): """Delete a child Zone.""" return IMPL.sm_volume_delete(context, volume_id) def sm_volume_get(context, volume_id): """Get a specific child Zone.""" return IMPL.sm_volume_get(context, volume_id) def sm_volume_get_all(context): """Get all child Zones.""" return IMPL.sm_volume_get_all(context) #################### def aggregate_create(context, values, metadata=None): """Create a new aggregate with metadata.""" return IMPL.aggregate_create(context, values, metadata) def aggregate_get(context, aggregate_id): """Get a specific aggregate by id.""" return IMPL.aggregate_get(context, aggregate_id) def aggregate_get_by_host(context, host): """Get a specific aggregate by host""" return IMPL.aggregate_get_by_host(context, host) def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. If values contains a metadata key, it updates the aggregate metadata too.""" return IMPL.aggregate_update(context, aggregate_id, values) def aggregate_delete(context, aggregate_id): """Delete an aggregate.""" return IMPL.aggregate_delete(context, aggregate_id) def aggregate_get_all(context): """Get all aggregates.""" return IMPL.aggregate_get_all(context) def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): """Add/update metadata. If set_delete=True, it adds only.""" IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) def aggregate_metadata_get(context, aggregate_id): """Get metadata for the specified aggregate.""" return IMPL.aggregate_metadata_get(context, aggregate_id) def aggregate_metadata_delete(context, aggregate_id, key): """Delete the given metadata key.""" IMPL.aggregate_metadata_delete(context, aggregate_id, key) def aggregate_host_add(context, aggregate_id, host): """Add host to the aggregate.""" IMPL.aggregate_host_add(context, aggregate_id, host) def aggregate_host_get_all(context, aggregate_id): """Get hosts for the specified aggregate.""" return IMPL.aggregate_host_get_all(context, aggregate_id) def aggregate_host_delete(context, aggregate_id, host): """Delete the given host from the aggregate.""" IMPL.aggregate_host_delete(context, aggregate_id, host) #################### def instance_fault_create(context, values): """Create a new Instance Fault.""" return IMPL.instance_fault_create(context, values) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3632_2
crossvul-python_data_good_2042_2
from collections import OrderedDict import sys import warnings from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured from django.core.paginator import InvalidPage from django.core.urlresolvers import reverse from django.db import models from django.db.models.fields import FieldDoesNotExist from django.utils import six from django.utils.deprecation import RenameMethodsBase, RemovedInDjango18Warning from django.utils.encoding import force_text from django.utils.translation import ugettext, ugettext_lazy from django.utils.http import urlencode from django.contrib.admin import FieldListFilter from django.contrib.admin.exceptions import ( DisallowedModelAdminLookup, DisallowedModelAdminToField, ) from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR from django.contrib.admin.utils import (quote, get_fields_from_path, lookup_needs_distinct, prepare_lookup_value) # Changelist settings ALL_VAR = 'all' ORDER_VAR = 'o' ORDER_TYPE_VAR = 'ot' PAGE_VAR = 'p' SEARCH_VAR = 'q' ERROR_FLAG = 'e' IGNORED_PARAMS = ( ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR) # Text to display within change-list table cells if the value is blank. EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)') def _is_changelist_popup(request): """ Returns True if the popup GET parameter is set. This function is introduced to facilitate deprecating the legacy value for IS_POPUP_VAR and should be removed at the end of the deprecation cycle. """ if IS_POPUP_VAR in request.GET: return True IS_LEGACY_POPUP_VAR = 'pop' if IS_LEGACY_POPUP_VAR in request.GET: warnings.warn( "The `%s` GET parameter has been renamed to `%s`." % (IS_LEGACY_POPUP_VAR, IS_POPUP_VAR), RemovedInDjango18Warning, 2) return True return False class RenameChangeListMethods(RenameMethodsBase): renamed_methods = ( ('get_query_set', 'get_queryset', RemovedInDjango18Warning), ) class ChangeList(six.with_metaclass(RenameChangeListMethods)): def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_queryset = model_admin.get_queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin self.preserved_filters = model_admin.get_preserved_filters(request) # Get search parameters from the query string. try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except ValueError: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = _is_changelist_popup(request) to_field = request.GET.get(TO_FIELD_VAR) if to_field and not model_admin.to_field_allowed(request, to_field): raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field) self.to_field = to_field self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.query = request.GET.get(SEARCH_VAR, '') self.queryset = self.get_queryset(request) self.get_results(request) if self.is_popup: title = ugettext('Select %s') else: title = ugettext('Select %s to change') self.title = title % force_text(self.opts.verbose_name) self.pk_attname = self.lookup_opts.pk.attname @property def root_query_set(self): warnings.warn("`ChangeList.root_query_set` is deprecated, " "use `root_queryset` instead.", RemovedInDjango18Warning, 2) return self.root_queryset @property def query_set(self): warnings.warn("`ChangeList.query_set` is deprecated, " "use `queryset` instead.", RemovedInDjango18Warning, 2) return self.queryset def get_filters_params(self, params=None): """ Returns all params except IGNORED_PARAMS """ if not params: params = self.params lookup_params = params.copy() # a dictionary of the query string # Remove all the parameters that are globally and systematically # ignored. for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] return lookup_params def get_filters(self, request): lookup_params = self.get_filters_params() use_distinct = False for key, value in lookup_params.items(): if not self.model_admin.lookup_allowed(key, value): raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key) filter_specs = [] if self.list_filter: for list_filter in self.list_filter: if callable(list_filter): # This is simply a custom list filter class. spec = list_filter(request, lookup_params, self.model, self.model_admin) else: field_path = None if isinstance(list_filter, (tuple, list)): # This is a custom FieldListFilter class for a given field. field, field_list_filter_class = list_filter else: # This is simply a field name, so use the default # FieldListFilter class that has been registered for # the type of the given field. field, field_list_filter_class = list_filter, FieldListFilter.create if not isinstance(field, models.Field): field_path = field field = get_fields_from_path(self.model, field_path)[-1] spec = field_list_filter_class(field, request, lookup_params, self.model, self.model_admin, field_path=field_path) # Check if we need to use distinct() use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)) if spec and spec.has_output(): filter_specs.append(spec) # At this point, all the parameters used by the various ListFilters # have been removed from lookup_params, which now only contains other # parameters passed via the query string. We now loop through the # remaining parameters both to ensure that all the parameters are valid # fields and to determine if at least one of them needs distinct(). If # the lookup parameters aren't real fields, then bail out. try: for key, value in lookup_params.items(): lookup_params[key] = prepare_lookup_value(key, value) use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, key)) return filter_specs, bool(filter_specs), lookup_params, use_distinct except FieldDoesNotExist as e: six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2]) def get_query_string(self, new_params=None, remove=None): if new_params is None: new_params = {} if remove is None: remove = [] p = self.params.copy() for r in remove: for k in list(p): if k.startswith(r): del p[k] for k, v in new_params.items(): if v is None: if k in p: del p[k] else: p[k] = v return '?%s' % urlencode(sorted(p.items())) def get_results(self, request): paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page) # Get the number of objects, with admin filters applied. result_count = paginator.count # Get the total number of objects, with no admin filters applied. # Perform a slight optimization: # full_result_count is equal to paginator.count if no filters # were applied if self.get_filters_params() or self.params.get(SEARCH_VAR): full_result_count = self.root_queryset.count() else: full_result_count = result_count can_show_all = result_count <= self.list_max_show_all multi_page = result_count > self.list_per_page # Get the list of objects to display on this page. if (self.show_all and can_show_all) or not multi_page: result_list = self.queryset._clone() else: try: result_list = paginator.page(self.page_num + 1).object_list except InvalidPage: raise IncorrectLookupParameters self.result_count = result_count self.full_result_count = full_result_count self.result_list = result_list self.can_show_all = can_show_all self.multi_page = multi_page self.paginator = paginator def _get_default_ordering(self): ordering = [] if self.model_admin.ordering: ordering = self.model_admin.ordering elif self.lookup_opts.ordering: ordering = self.lookup_opts.ordering return ordering def get_ordering_field(self, field_name): """ Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. """ try: field = self.lookup_opts.get_field(field_name) return field.name except models.FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, 'admin_order_field', None) def get_ordering(self, request, queryset): """ Returns the list of ordering fields for the change list. First we check the get_ordering() method in model admin, then we check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by ensuring the primary key is used as the last ordering field. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it # reverse order if order_field has already "-" as prefix if order_field.startswith('-') and pfx == "-": ordering.append(order_field[1:]) else: ordering.append(pfx + order_field) except (IndexError, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) # Ensure that the primary key is systematically present in the list of # ordering fields so we can guarantee a deterministic order across all # database backends. pk_name = self.lookup_opts.pk.name if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])): # The two sets do not intersect, meaning the pk isn't present. So # we add it. ordering.append('-pk') return ordering def get_ordering_field_columns(self): """ Returns an OrderedDict of ordering field column numbers and asc/desc """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = OrderedDict() if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except ValueError: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields def get_queryset(self, request): # First, we collect all the declared list filters. (self.filter_specs, self.has_filters, remaining_lookup_params, filters_use_distinct) = self.get_filters(request) # Then, we let every list filter modify the queryset to its liking. qs = self.root_queryset for filter_spec in self.filter_specs: new_qs = filter_spec.queryset(request, qs) if new_qs is not None: qs = new_qs try: # Finally, we apply the remaining lookup parameters from the query # string (i.e. those that haven't already been processed by the # filters). qs = qs.filter(**remaining_lookup_params) except (SuspiciousOperation, ImproperlyConfigured): # Allow certain types of errors to be re-raised as-is so that the # caller can treat them in a special way. raise except Exception as e: # Every other error is caught with a naked except, because we don't # have any other way of validating lookup parameters. They might be # invalid if the keyword arguments are incorrect, or if the values # are not in the correct type, so we might get FieldError, # ValueError, ValidationError, or ?. raise IncorrectLookupParameters(e) if not qs.query.select_related: qs = self.apply_select_related(qs) # Set ordering. ordering = self.get_ordering(request, qs) qs = qs.order_by(*ordering) # Apply search results qs, search_use_distinct = self.model_admin.get_search_results( request, qs, self.query) # Remove duplicates from results, if necessary if filters_use_distinct | search_use_distinct: return qs.distinct() else: return qs def apply_select_related(self, qs): if self.list_select_related is True: return qs.select_related() if self.list_select_related is False: if self.has_related_field_in_list_display(): return qs.select_related() if self.list_select_related: return qs.select_related(*self.list_select_related) return qs def has_related_field_in_list_display(self): for field_name in self.list_display: try: field = self.lookup_opts.get_field(field_name) except models.FieldDoesNotExist: pass else: if isinstance(field.rel, models.ManyToOneRel): return True return False def url_for_result(self, result): pk = getattr(result, self.pk_attname) return reverse('admin:%s_%s_change' % (self.opts.app_label, self.opts.model_name), args=(quote(pk),), current_app=self.model_admin.admin_site.name)
./CrossVul/dataset_final_sorted/CWE-264/py/good_2042_2
crossvul-python_data_good_3784_0
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import json import re import urllib import webob.exc from glance.api import policy import glance.api.v2 as v2 from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance.openstack.common import timeutils import glance.schema import glance.store LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.db_api.configure_db() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance.store self.store_api.create_stores() def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise webob.exc.HTTPForbidden() def _normalize_properties(self, image): """Convert the properties from the stored format to a dict The db api returns a list of dicts that look like {'name': <key>, 'value': <value>}, while it expects a format like {<key>: <value>} in image create and update calls. This function takes the extra step that the db api should be responsible for in the image get calls. The db api will also return deleted image properties that must be filtered out. """ properties = [(p['name'], p['value']) for p in image['properties'] if not p['deleted']] image['properties'] = dict(properties) return image def _extract_tags(self, image): try: #NOTE(bcwaldon): cast to set to make the list unique, then # cast back to list since that's a more sane response type return list(set(image.pop('tags'))) except KeyError: pass def _append_tags(self, context, image): image['tags'] = self.db_api.image_tag_get_all(context, image['id']) return image @utils.mutating def create(self, req, image): self._enforce(req, 'add_image') is_public = image.get('is_public') if is_public: self._enforce(req, 'publicize_image') image['owner'] = req.context.owner image['status'] = 'queued' tags = self._extract_tags(image) image = dict(self.db_api.image_create(req.context, image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image['id'], tags) image['tags'] = tags else: image['tags'] = [] image = self._normalize_properties(dict(image)) self.notifier.info('image.update', image) return image def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters={}): self._enforce(req, 'get_images') filters['deleted'] = False result = {} if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) try: images = self.db_api.image_get_all(req.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) if len(images) != 0 and len(images) == limit: result['next_marker'] = images[-1]['id'] except exception.InvalidFilterRangeValue as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.InvalidSortKey as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) except exception.NotFound as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) images = [self._normalize_properties(dict(image)) for image in images] result['images'] = [self._append_tags(req.context, image) for image in images] return result def _get_image(self, context, image_id): try: image = self.db_api.image_get(context, image_id) if image['deleted']: raise exception.NotFound() except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() else: return dict(image) def show(self, req, image_id): self._enforce(req, 'get_image') image = self._get_image(req.context, image_id) image = self._normalize_properties(image) return self._append_tags(req.context, image) @utils.mutating def update(self, req, image_id, changes): self._enforce(req, 'modify_image') context = req.context image = self._get_image(context, image_id) image = self._normalize_properties(image) updates = self._extract_updates(req, image, changes) tags = None if len(updates) > 0: tags = self._extract_tags(updates) purge_props = 'properties' in updates try: image = self.db_api.image_update(context, image_id, updates, purge_props) except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() image = self._normalize_properties(dict(image)) if tags is not None: self.db_api.image_tag_set_all(req.context, image_id, tags) image['tags'] = tags else: self._append_tags(req.context, image) self.notifier.info('image.update', image) return image def _extract_updates(self, req, image, changes): """ Determine the updates to pass to the database api. Given the current image, convert a list of changes to be made into the corresponding update dictionary that should be passed to db_api.image_update. Changes have the following parts op - 'add' a new attribute, 'replace' an existing attribute, or 'remove' an existing attribute. path - A list of path parts for determining which attribute the the operation applies to. value - For 'add' and 'replace', the new value the attribute should assume. For the current use case, there are two types of valid paths. For base attributes (fields stored directly on the Image object) the path must take the form ['<attribute name>']. These attributes are always present so the only valid operation on them is 'replace'. For image properties, the path takes the form ['properties', '<property name>'] and all operations are valid. Future refactoring should simplify this code by hardening the image abstraction such that database details such as how image properties are stored do not have any influence here. """ updates = {} property_updates = image['properties'] for change in changes: path = change['path'] if len(path) == 1: assert change['op'] == 'replace' key = change['path'][0] if key == 'is_public' and change['value']: self._enforce(req, 'publicize_image') updates[key] = change['value'] else: assert len(path) == 2 assert path[0] == 'properties' update_method_name = '_do_%s_property' % change['op'] assert hasattr(self, update_method_name) update_method = getattr(self, update_method_name) update_method(property_updates, change) updates['properties'] = property_updates return updates def _do_replace_property(self, updates, change): """ Replace a single image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_add_property(self, updates, change): """ Add a new image property, ensuring it does not already exist. """ key = change['path'][1] if key in updates: msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % key) updates[key] = change['value'] def _do_remove_property(self, updates, change): """ Remove an image property, ensuring it's present. """ key = change['path'][1] if key not in updates: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % key) del updates[key] @utils.mutating def delete(self, req, image_id): self._enforce(req, 'delete_image') image = self._get_image(req.context, image_id) if image['protected']: msg = _("Unable to delete as image %(image_id)s is protected" % locals()) raise webob.exc.HTTPForbidden(explanation=msg) if image['location'] and CONF.delayed_delete: status = 'pending_delete' else: status = 'deleted' try: self.db_api.image_update(req.context, image_id, {'status': status}) self.db_api.image_destroy(req.context, image_id) if image['location']: if CONF.delayed_delete: self.store_api.schedule_delayed_delete_from_backend( image['location'], id) else: self.store_api.safe_delete_from_backend(image['location'], req.context, id) except (exception.NotFound, exception.Forbidden): msg = ("Failed to find image %(image_id)s to delete" % locals()) LOG.info(msg) raise webob.exc.HTTPNotFound() else: self.notifier.info('image.delete', image) class RequestDeserializer(wsgi.JSONRequestDeserializer): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'size', 'direct_url', 'self', 'file', 'schema'] _reserved_properties = ['owner', 'is_public', 'location', 'deleted', 'deleted_at'] _base_properties = ['checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'status', 'tags', 'updated_at', 'visibility', 'protected'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _parse_image(self, request): body = self._get_request_body(request) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) # Ensure all specified properties are allowed self._check_readonly(body) self._check_reserved(body) # Create a dict of base image properties, with user- and deployer- # defined properties contained in a 'properties' dictionary image = {'properties': body} for key in self._base_properties: try: image[key] = image['properties'].pop(key) except KeyError: pass if 'visibility' in image: image['is_public'] = image.pop('visibility') == 'public' return {'image': image} def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if not 'body' in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_readonly(cls, image): for key in cls._readonly_properties: if key in image: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) @classmethod def _check_reserved(cls, image): for key in cls._reserved_properties: if key in image: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) def create(self, request): return self._parse_image(request) def _get_change_operation(self, raw_change): op = None for key in ['replace', 'add', 'remove']: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path(self, raw_change, op): key = self._decode_json_pointer(raw_change[op]) if key in self._readonly_properties: msg = "Attribute \'%s\' is read-only." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) if key in self._reserved_properties: msg = "Attribute \'%s\' is reserved." % key raise webob.exc.HTTPForbidden(explanation=unicode(msg)) # For image properties, we need to put "properties" at the beginning if key not in self._base_properties: return ['properties', key] return [key] def _decode_json_pointer(self, pointer): """ Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) return pointer.lstrip('/').replace('~1', '/').replace('~0', '~') def _validate_json_pointer(self, pointer): """ Validate a json pointer. We only accept a limited form of json pointers. Specifically, we do not allow multiple levels of indirection, so there can only be one '/' in the pointer, located at the start of the string. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if '/' in pointer[1:]: msg = _('Pointer `%s` contains more than one "/".' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) if re.match('~[^01]', pointer): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.' % pointer) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): if change['op'] == 'delete': return partial_image = {change['path'][-1]: change['value']} try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=unicode(e)) def update(self, request): changes = [] valid_content_types = [ 'application/openstack-images-v2.0-json-patch' ] if request.content_type not in valid_content_types: headers = {'Accept-Patch': ','.join(valid_content_types)} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) op = self._get_change_operation(raw_change) path = self._get_change_path(raw_change, op) change = {'op': op, 'path': path} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) if change['path'] == ['visibility']: change['path'] = ['is_public'] change['value'] = change['value'] == 'public' changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s' % sort_dir) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.pop('visibility', None) if visibility: if visibility in ['public', 'private']: filters['is_public'] = visibility == 'public' else: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image['id'] if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _get_image_links(self, image): return [ {'rel': 'self', 'href': self._get_image_href(image)}, {'rel': 'file', 'href': self._get_image_href(image, 'file')}, {'rel': 'describedby', 'href': '/v2/schemas/image'}, ] def _format_image(self, image): #NOTE(bcwaldon): merge the contained properties dict with the # top-level image object image_view = image['properties'] attributes = ['id', 'name', 'disk_format', 'container_format', 'size', 'status', 'checksum', 'tags', 'protected', 'created_at', 'updated_at', 'min_ram', 'min_disk'] for key in attributes: image_view[key] = image[key] location = image['location'] if CONF.show_image_direct_url and location is not None: image_view['direct_url'] = location visibility = 'public' if image['is_public'] else 'private' image_view['visibility'] = visibility image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' self._serialize_datetimes(image_view) image_view = self.schema.filter(image_view) return image_view @staticmethod def _serialize_datetimes(image): for (key, value) in image.iteritems(): if isinstance(value, datetime.datetime): image[key] = timeutils.isotime(value) def create(self, response, image): response.status_int = 201 body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' response.location = self._get_image_href(image) def show(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def update(self, response, image): body = json.dumps(self._format_image(image), ensure_ascii=False) response.unicode_body = unicode(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urllib.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urllib.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = unicode(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 _BASE_PROPERTIES = { 'id': { 'type': 'string', 'description': 'An identifier for the image', 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': 'string', 'description': 'Descriptive name for the image', 'maxLength': 255, }, 'status': { 'type': 'string', 'description': 'Status of the image', 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'], }, 'visibility': { 'type': 'string', 'description': 'Scope of image accessibility', 'enum': ['public', 'private'], }, 'protected': { 'type': 'boolean', 'description': 'If true, image will not be deletable.', }, 'checksum': { 'type': 'string', 'description': 'md5 hash of image contents.', 'type': 'string', 'maxLength': 32, }, 'size': { 'type': 'integer', 'description': 'Size of image file in bytes', }, 'container_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['bare', 'ovf', 'ami', 'aki', 'ari'], }, 'disk_format': { 'type': 'string', 'description': '', 'type': 'string', 'enum': ['raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', 'ari', 'ami'], }, 'created_at': { 'type': 'string', 'description': 'Date and time of image registration', #TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! #'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': 'Date and time of the last image modification', #'format': 'date-time', }, 'tags': { 'type': 'array', 'description': 'List of strings related to the image', 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'description': 'URL to access the image file kept in external store', }, 'min_ram': { 'type': 'integer', 'description': 'Amount of ram (in MB) required to boot image.', }, 'min_disk': { 'type': 'integer', 'description': 'Amount of disk space (in GB) required to boot image.', }, 'self': {'type': 'string'}, 'file': {'type': 'string'}, 'schema': {'type': 'string'}, } _BASE_LINKS = [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = copy.deepcopy(_BASE_PROPERTIES) links = copy.deepcopy(_BASE_LINKS) if CONF.allow_additional_image_properties: schema = glance.schema.PermissiveSchema('image', properties, links) else: schema = glance.schema.Schema('image', properties) schema.merge_properties(custom_properties or {}) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: schema_file = open(match) schema_data = schema_file.read() return json.loads(schema_data) else: msg = _('Could not find schema properties file %s. Continuing ' 'without custom properties') LOG.warn(msg % filename) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3784_0
crossvul-python_data_bad_3693_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import manager from keystone.common import wsgi CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3693_0
crossvul-python_data_good_5539_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to compute resources (e.g. guest VMs, networking and storage of VMs, and compute hosts on which they run).""" import base64 import functools import re import string import time import urllib from nova import block_device from nova.compute import instance_types from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import crypto from nova.db import base from nova import exception from nova import flags from nova.image import glance from nova import network from nova import notifications from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils import nova.policy from nova import quota from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova import volume LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS flags.DECLARE('consoleauth_topic', 'nova.consoleauth') MAX_USERDATA_SIZE = 65535 QUOTAS = quota.QUOTAS def check_instance_state(vm_state=None, task_state=(None,)): """Decorator to check VM and/or task state before entry to API functions. If the instance is in the wrong state, the wrapper will raise an exception. """ if vm_state is not None and not isinstance(vm_state, set): vm_state = set(vm_state) if task_state is not None and not isinstance(task_state, set): task_state = set(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if vm_state is not None and instance['vm_state'] not in vm_state: raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance['uuid'], state=instance['vm_state'], method=f.__name__) if (task_state is not None and instance['task_state'] not in task_state): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state'], method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer def check_instance_lock(function): @functools.wraps(function) def inner(self, context, instance, *args, **kwargs): if instance['locked'] and not context.is_admin: raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) return function(self, context, instance, *args, **kwargs) return inner def policy_decorator(scope): """Check corresponding policy prior of wrapped method to execution""" def outer(func): @functools.wraps(func) def wrapped(self, context, target, *args, **kwargs): check_policy(context, func.__name__, target, scope) return func(self, context, target, *args, **kwargs) return wrapped return outer wrap_check_policy = policy_decorator(scope='compute') wrap_check_security_groups_policy = policy_decorator( scope='compute:security_groups') def check_policy(context, action, target, scope='compute'): _action = '%s:%s' % (scope, action) nova.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the compute manager.""" def __init__(self, image_service=None, network_api=None, volume_api=None, security_group_api=None, **kwargs): self.image_service = (image_service or glance.get_default_image_service()) self.network_api = network_api or network.API() self.volume_api = volume_api or volume.API() self.security_group_api = security_group_api or SecurityGroupAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(API, self).__init__(**kwargs) def _instance_update(self, context, instance_uuid, **kwargs): """Update an instance in the database using kwargs as value.""" (old_ref, instance_ref) = self.db.instance_update_and_get_original( context, instance_uuid, kwargs) notifications.send_update(context, old_ref, instance_ref) return instance_ref def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ if injected_files is None: return # Check number of files first try: QUOTAS.limit_check(context, injected_files=len(injected_files)) except exception.OverQuota: raise exception.OnsetFileLimitExceeded() # OK, now count path and content lengths; we're looking for # the max... max_path = 0 max_content = 0 for path, content in injected_files: max_path = max(max_path, len(path)) max_content = max(max_content, len(content)) try: QUOTAS.limit_check(context, injected_file_path_bytes=max_path, injected_file_content_bytes=max_content) except exception.OverQuota as exc: # Favor path limit over content limit for reporting # purposes if 'injected_file_path_bytes' in exc.kwargs['overs']: raise exception.OnsetFilePathLimitExceeded() else: raise exception.OnsetFileContentLimitExceeded() def _check_num_instances_quota(self, context, instance_type, min_count, max_count): """Enforce quota limits on number of instances created.""" # Determine requested cores and ram req_cores = max_count * instance_type['vcpus'] req_ram = max_count * instance_type['memory_mb'] # Check the quota try: reservations = QUOTAS.reserve(context, instances=max_count, cores=req_cores, ram=req_ram) except exception.OverQuota as exc: # OK, we exceeded quota; let's figure out why... quotas = exc.kwargs['quotas'] usages = exc.kwargs['usages'] overs = exc.kwargs['overs'] headroom = dict((res, quotas[res] - (usages[res]['in_use'] + usages[res]['reserved'])) for res in quotas.keys()) allowed = headroom['instances'] # Reduce 'allowed' instances in line with the cores & ram headroom if instance_type['vcpus']: allowed = min(allowed, headroom['cores'] // instance_type['vcpus']) if instance_type['memory_mb']: allowed = min(allowed, headroom['ram'] // instance_type['memory_mb']) # Convert to the appropriate exception message if allowed <= 0: msg = _("Cannot run any more instances of this type.") allowed = 0 elif min_count <= allowed <= max_count: # We're actually OK, but still need reservations return self._check_num_instances_quota(context, instance_type, min_count, allowed) else: msg = (_("Can only run %s more instances of this type.") % allowed) resource = overs[0] used = quotas[resource] - headroom[resource] total_allowed = used + headroom[resource] overs = ','.join(overs) pid = context.project_id LOG.warn(_("%(overs)s quota exceeded for %(pid)s," " tried to run %(min_count)s instances. %(msg)s"), locals()) requested = dict(instances=min_count, cores=req_cores, ram=req_ram) raise exception.TooManyInstances(overs=overs, req=requested[resource], used=used, allowed=total_allowed, resource=resource) return max_count, reservations def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" if not metadata: metadata = {} num_metadata = len(metadata) try: QUOTAS.limit_check(context, metadata_items=num_metadata) except exception.OverQuota as exc: pid = context.project_id LOG.warn(_("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals()) quota_metadata = exc.kwargs['quotas']['metadata_items'] raise exception.MetadataLimitExceeded(allowed=quota_metadata) # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for k, v in metadata.iteritems(): if len(k) == 0: msg = _("Metadata property key blank") LOG.warn(msg) raise exception.InvalidMetadata(reason=msg) if len(k) > 255: msg = _("Metadata property key greater than 255 characters") LOG.warn(msg) raise exception.InvalidMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property value greater than 255 characters") LOG.warn(msg) raise exception.InvalidMetadataSize(reason=msg) def _check_requested_networks(self, context, requested_networks): """ Check if the networks requested belongs to the project and the fixed IP address for each network provided is within same the network block """ if requested_networks is None: return self.network_api.validate_networks(context, requested_networks) @staticmethod def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image, image_service): """Choose kernel and ramdisk appropriate for the instance. The kernel and ramdisk can be chosen in one of three ways: 1. Passed in with create-instance request. 2. Inherited from image. 3. Forced to None by using `null_kernel` FLAG. """ # Inherit from image if not specified if kernel_id is None: kernel_id = image['properties'].get('kernel_id') if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id') # Force to None if using null_kernel if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None # Verify kernel and ramdisk exist (fail-fast) if kernel_id is not None: image_service.show(context, kernel_id) if ramdisk_id is not None: image_service.show(context, ramdisk_id) return kernel_id, ramdisk_id @staticmethod def _handle_availability_zone(availability_zone): # NOTE(vish): We have a legacy hack to allow admins to specify hosts # via az using az:host. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. forced_host = None if availability_zone and ':' in availability_zone: availability_zone, forced_host = availability_zone.split(':') if not availability_zone: availability_zone = FLAGS.default_schedule_zone return availability_zone, forced_host @staticmethod def _inherit_properties_from_image(image, auto_disk_config): def prop(prop_, prop_type=None): """Return the value of an image property.""" value = image['properties'].get(prop_) if value is not None: if prop_type == 'bool': value = utils.bool_from_str(value) return value options_from_image = {'os_type': prop('os_type'), 'architecture': prop('arch'), 'vm_mode': prop('vm_mode')} # If instance doesn't have auto_disk_config overridden by request, use # whatever the image indicates if auto_disk_config is None: auto_disk_config = prop('auto_disk_config', prop_type='bool') options_from_image['auto_disk_config'] = auto_disk_config return options_from_image def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, reservation_id=None, scheduler_hints=None): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation.""" if not metadata: metadata = {} if not security_group: security_group = 'default' if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count block_device_mapping = block_device_mapping or [] if instance_type['disabled']: raise exception.InstanceTypeNotFound( instance_type_id=instance_type['id']) # Reserve quotas num_instances, quota_reservations = self._check_num_instances_quota( context, instance_type, min_count, max_count) # Try to create the instance try: instances = [] instance_uuids = [] self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) self._check_requested_networks(context, requested_networks) (image_service, image_id) = glance.get_remote_image_service( context, image_href) image = image_service.show(context, image_id) if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() # Handle config_drive config_drive_id = None if config_drive and config_drive is not True: # config_drive is volume id config_drive_id = config_drive config_drive = None # Ensure config_drive image exists image_service.show(context, config_drive_id) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, kernel_id, ramdisk_id, image, image_service) if key_data is None and key_name: key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') # grab the architecture from glance architecture = image['properties'].get('architecture', 'Unknown') root_device_name = block_device.properties_root_device_name( image['properties']) availability_zone, forced_host = self._handle_availability_zone( availability_zone) base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive_id': config_drive_id or '', 'config_drive': config_drive or '', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'root_gb': instance_type['root_gb'], 'ephemeral_gb': instance_type['ephemeral_gb'], 'display_name': display_name, 'display_description': display_description or '', 'user_data': user_data, 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'access_ip_v4': access_ip_v4, 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'root_device_name': root_device_name, 'architecture': architecture, 'progress': 0} if user_data: l = len(user_data) if l > MAX_USERDATA_SIZE: # NOTE(mikal): user_data is stored in a text column, and # the database might silently truncate if its over length. raise exception.InstanceUserDataTooLarge( length=l, maxsize=MAX_USERDATA_SIZE) try: base64.decodestring(user_data) except base64.binascii.Error: raise exception.InstanceUserDataMalformed() options_from_image = self._inherit_properties_from_image( image, auto_disk_config) base_options.update(options_from_image) LOG.debug(_("Going to run %s instances...") % num_instances) filter_properties = dict(scheduler_hints=scheduler_hints) if context.is_admin and forced_host: filter_properties['force_hosts'] = [forced_host] for i in xrange(num_instances): options = base_options.copy() instance = self.create_db_entry_for_new_instance( context, instance_type, image, options, security_group, block_device_mapping) instances.append(instance) instance_uuids.append(instance['uuid']) self._validate_bdm(context, instance) # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="api") # In the case of any exceptions, attempt DB cleanup and rollback the # quota reservations. except Exception: with excutils.save_and_reraise_exception(): try: for instance_uuid in instance_uuids: self.db.instance_destroy(context, instance_uuid) finally: QUOTAS.rollback(context, quota_reservations) # Commit the reservations QUOTAS.commit(context, quota_reservations) request_spec = { 'image': jsonutils.to_primitive(image), 'instance_properties': base_options, 'instance_type': instance_type, 'instance_uuids': instance_uuids, 'block_device_mapping': block_device_mapping, 'security_group': security_group, } self.scheduler_rpcapi.run_instance(context, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, is_first_time=True, filter_properties=filter_properties) return (instances, reservation_id) @staticmethod def _volume_size(instance_type, virtual_name): size = 0 if virtual_name == 'swap': size = instance_type.get('swap', 0) elif block_device.is_ephemeral(virtual_name): num = block_device.ephemeral_num(virtual_name) # TODO(yamahata): ephemeralN where N > 0 # Only ephemeral0 is allowed for now because InstanceTypes # table only allows single local disk, ephemeral_gb. # In order to enhance it, we need to add a new columns to # instance_types table. if num > 0: return 0 size = instance_type.get('ephemeral_gb') return size def _update_image_block_device_mapping(self, elevated_context, instance_type, instance_uuid, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm, instance_uuid=instance_uuid) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue if not block_device.is_swap_or_ephemeral(virtual_name): continue size = self._volume_size(instance_type, virtual_name) if size == 0: continue values = { 'instance_uuid': instance_uuid, 'device_name': bdm['device'], 'virtual_name': virtual_name, 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) def _update_block_device_mapping(self, elevated_context, instance_type, instance_uuid, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ LOG.debug(_("block_device_mapping %s"), block_device_mapping, instance_uuid=instance_uuid) for bdm in block_device_mapping: assert 'device_name' in bdm values = {'instance_uuid': instance_uuid} for key in ('device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device'): values[key] = bdm.get(key) virtual_name = bdm.get('virtual_name') if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): size = self._volume_size(instance_type, virtual_name) if size == 0: continue values['volume_size'] = size # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size'): values[k] = None self.db.block_device_mapping_update_or_create(elevated_context, values) def _validate_bdm(self, context, instance): for bdm in self.db.block_device_mapping_get_all_by_instance( context, instance['uuid']): # NOTE(vish): For now, just make sure the volumes are accessible. snapshot_id = bdm.get('snapshot_id') volume_id = bdm.get('volume_id') if volume_id is not None: try: self.volume_api.get(context, volume_id) except Exception: raise exception.InvalidBDMVolume(id=volume_id) elif snapshot_id is not None: try: self.volume_api.get_snapshot(context, snapshot_id) except Exception: raise exception.InvalidBDMSnapshot(id=snapshot_id) def _populate_instance_for_bdm(self, context, instance, instance_type, image, block_device_mapping): """Populate instance block device mapping information.""" # FIXME(comstud): Why do the block_device_mapping DB calls # require elevated context? elevated = context.elevated() instance_uuid = instance['uuid'] mappings = image['properties'].get('mappings', []) if mappings: self._update_image_block_device_mapping(elevated, instance_type, instance_uuid, mappings) image_bdm = image['properties'].get('block_device_mapping', []) for mapping in (image_bdm, block_device_mapping): if not mapping: continue self._update_block_device_mapping(elevated, instance_type, instance_uuid, mapping) def _populate_instance_shutdown_terminate(self, instance, image, block_device_mapping): """Populate instance shutdown_terminate information.""" if (block_device_mapping or image['properties'].get('mappings') or image['properties'].get('block_device_mapping')): instance['shutdown_terminate'] = False def _populate_instance_names(self, instance): """Populate instance display_name and hostname.""" display_name = instance.get('display_name') hostname = instance.get('hostname') if display_name is None: display_name = self._default_display_name(instance['uuid']) instance['display_name'] = display_name if hostname is None: hostname = display_name instance['hostname'] = utils.sanitize_hostname(hostname) def _default_display_name(self, instance_uuid): return "Server %s" % instance_uuid def _populate_instance_for_create(self, base_options, image, security_groups): """Build the beginning of a new instance.""" instance = base_options if not instance.get('uuid'): # Generate the instance_uuid here so we can use it # for additional setup before creating the DB entry. instance['uuid'] = str(utils.gen_uuid()) instance['launch_index'] = 0 instance['vm_state'] = vm_states.BUILDING instance['task_state'] = task_states.SCHEDULING instance['architecture'] = image['properties'].get('architecture') instance['info_cache'] = {'network_info': '[]'} # Store image properties so we can use them later # (for notifications, etc). Only store what we can. instance.setdefault('system_metadata', {}) for key, value in image['properties'].iteritems(): new_value = str(value)[:255] instance['system_metadata']['image_%s' % key] = new_value # Keep a record of the original base image that this # image's instance is derived from: base_image_ref = image['properties'].get('base_image_ref') if not base_image_ref: # base image ref property not previously set through a snapshot. # default to using the image ref as the base: base_image_ref = base_options['image_ref'] instance['system_metadata']['image_base_image_ref'] = base_image_ref # Use 'default' security_group if none specified. if security_groups is None: security_groups = ['default'] elif not isinstance(security_groups, list): security_groups = [security_groups] instance['security_groups'] = security_groups return instance #NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, base_options, security_group, block_device_mapping): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). This is called by the scheduler after a location for the instance has been determined. """ instance = self._populate_instance_for_create(base_options, image, security_group) self._populate_instance_names(instance) self._populate_instance_shutdown_terminate(instance, image, block_device_mapping) # ensure_default security group is called before the instance # is created so the creation of the default security group is # proxied to the sgh. self.security_group_api.ensure_default(context) instance = self.db.instance_create(context, instance) self._populate_instance_for_bdm(context, instance, instance_type, image, block_device_mapping) return instance def _check_create_policies(self, context, availability_zone, requested_networks, block_device_mapping): """Check policies for create().""" target = {'project_id': context.project_id, 'user_id': context.user_id, 'availability_zone': availability_zone} check_policy(context, 'create', target) if requested_networks: check_policy(context, 'create:attach_network', target) if block_device_mapping: check_policy(context, 'create:attach_volume', target) def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None): """ Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) """ self._check_create_policies(context, availability_zone, requested_networks, block_device_mapping) return self._create_instance( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, scheduler_hints=scheduler_hints) def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall""" hosts = [x['host'] for (x, idx) in self.db.service_get_all_compute_sorted(context)] for host in hosts: self.compute_rpcapi.refresh_provider_fw_rules(context, host) @wrap_check_policy def update(self, context, instance, **kwargs): """Updates the instance in the datastore. :param context: The security context :param instance: The instance to update :param kwargs: All additional keyword args are treated as data fields of the instance to be updated :returns: None """ _, updated = self._update(context, instance, **kwargs) return updated def _update(self, context, instance, **kwargs): # Update the instance record and send a state update notification # if task or vm state changed old_ref, instance_ref = self.db.instance_update_and_get_original( context, instance['uuid'], kwargs) notifications.send_update(context, old_ref, instance_ref, service="api") return dict(old_ref.iteritems()), dict(instance_ref.iteritems()) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=None, task_state=None) def soft_delete(self, context, instance): """Terminate an instance.""" LOG.debug(_('Going to try to soft delete instance'), instance=instance) if instance['disable_terminate']: return # NOTE(jerdfelt): The compute daemon handles reclaiming instances # that are in soft delete. If there is no host assigned, there is # no daemon to reclaim, so delete it immediately. if instance['host']: instance = self.update(context, instance, task_state=task_states.POWERING_OFF, expected_task_state=None, deleted_at=timeutils.utcnow()) self.compute_rpcapi.power_off_instance(context, instance) else: LOG.warning(_('No host for instance, deleting immediately'), instance=instance) try: self.db.instance_destroy(context, instance['uuid']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass def _delete(self, context, instance): host = instance['host'] reservations = None try: #Note(maoy): no expected_task_state needs to be set old, updated = self._update(context, instance, task_state=task_states.DELETING, progress=0) # Avoid double-counting the quota usage reduction # where delete is already in progress if old['task_state'] != task_states.DELETING: reservations = QUOTAS.reserve(context, instances=-1, cores=-instance['vcpus'], ram=-instance['memory_mb']) if not host: # Just update database, nothing else we can do constraint = self.db.constraint(host=self.db.equal_any(host)) try: result = self.db.instance_destroy(context, instance['uuid'], constraint) if reservations: QUOTAS.commit(context, reservations) return result except exception.ConstraintNotMet: # Refresh to get new host information instance = self.get(context, instance['uuid']) if instance['vm_state'] == vm_states.RESIZED: # If in the middle of a resize, use confirm_resize to # ensure the original instance is cleaned up too get_migration = self.db.migration_get_by_instance_and_status try: migration_ref = get_migration(context.elevated(), instance['uuid'], 'finished') except exception.MigrationNotFoundByStatus: migration_ref = None if migration_ref: src_host = migration_ref['source_compute'] # Call since this can race with the terminate_instance. # The resize is done but awaiting confirmation/reversion, # so there are two cases: # 1. up-resize: here -instance['vcpus'/'memory_mb'] match # the quota usages accounted for this instance, # so no further quota adjustment is needed # 2. down-resize: here -instance['vcpus'/'memory_mb'] are # shy by delta(old, new) from the quota usages accounted # for this instance, so we must adjust deltas = self._downsize_quota_delta(context, migration_ref) downsize_reservations = self._reserve_quota_delta(context, deltas) self.compute_rpcapi.confirm_resize(context.elevated(), instance, migration_ref['id'], host=src_host, cast=False, reservations=downsize_reservations) is_up = False bdms = self.db.block_device_mapping_get_all_by_instance( context, instance["uuid"]) #Note(jogo): db allows for multiple compute services per host try: services = self.db.service_get_all_compute_by_host( context.elevated(), instance['host']) except exception.ComputeHostNotFound: services = [] for service in services: if utils.service_is_up(service): is_up = True self.compute_rpcapi.terminate_instance(context, instance) break if not is_up: # If compute node isn't up, just delete from DB self._local_delete(context, instance, bdms) if reservations: QUOTAS.commit(context, reservations) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. if reservations: QUOTAS.rollback(context, reservations) except Exception: with excutils.save_and_reraise_exception(): if reservations: QUOTAS.rollback(context, reservations) def _local_delete(self, context, instance, bdms): LOG.warning(_('host for instance is down, deleting from ' 'database'), instance=instance) instance_uuid = instance['uuid'] self.db.instance_info_cache_delete(context, instance_uuid) compute_utils.notify_about_instance_usage( context, instance, "delete.start") elevated = context.elevated() self.network_api.deallocate_for_instance(elevated, instance) system_meta = self.db.instance_system_metadata_get(context, instance_uuid) # cleanup volumes for bdm in bdms: if bdm['volume_id']: volume = self.volume_api.get(context, bdm['volume_id']) # NOTE(vish): We don't have access to correct volume # connector info, so just pass a fake # connector. This can be improved when we # expose get_volume_connector to rpc. connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.volume_api.terminate_connection(context, volume, connector) self.volume_api.detach(elevated, volume) if bdm['delete_on_termination']: self.volume_api.delete(context, volume) self.db.block_device_mapping_destroy(context, bdm['id']) instance = self._instance_update(context, instance_uuid, vm_state=vm_states.DELETED, task_state=None, terminated_at=timeutils.utcnow()) self.db.instance_destroy(context, instance_uuid) compute_utils.notify_about_instance_usage( context, instance, "delete.end", system_metadata=system_meta) # NOTE(maoy): we allow delete to be called no matter what vm_state says. @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=None, task_state=None) def delete(self, context, instance): """Terminate an instance.""" LOG.debug(_("Going to try to terminate instance"), instance=instance) if instance['disable_terminate']: return self._delete(context, instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SOFT_DELETED]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" if instance['host']: instance = self.update(context, instance, task_state=task_states.POWERING_ON, expected_task_state=None, deleted_at=None) self.compute_rpcapi.power_on_instance(context, instance) else: self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, expected_task_state=None, deleted_at=None) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SOFT_DELETED]) def force_delete(self, context, instance): """Force delete a previously deleted (but not reclaimed) instance.""" self._delete(context, instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED, vm_states.ERROR, vm_states.STOPPED], task_state=[None]) def stop(self, context, instance, do_cast=True): """Stop an instance.""" LOG.debug(_("Going to try to stop instance"), instance=instance) instance = self.update(context, instance, task_state=task_states.STOPPING, expected_task_state=None, progress=0) self.compute_rpcapi.stop_instance(context, instance, cast=do_cast) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.STOPPED]) def start(self, context, instance): """Start an instance.""" LOG.debug(_("Going to try to start instance"), instance=instance) instance = self.update(context, instance, task_state=task_states.STARTING, expected_task_state=None) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. self.compute_rpcapi.start_instance(context, instance) #NOTE(bcwaldon): no policy check here since it should be rolled in to # search_opts in get_all def get_active_by_window(self, context, begin, end=None, project_id=None): """Get instances that were continuously active over a window.""" return self.db.instance_get_active_by_window(context, begin, end, project_id) #NOTE(bcwaldon): this doesn't really belong in this class def get_instance_type(self, context, instance_type_id): """Get an instance type by instance type id.""" return instance_types.get_instance_type(instance_type_id) def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(ameade): we still need to support integer ids for ec2 if utils.is_uuid_like(instance_id): instance = self.db.instance_get_by_uuid(context, instance_id) else: instance = self.db.instance_get(context, instance_id) check_policy(context, 'get', instance) inst = dict(instance.iteritems()) # NOTE(comstud): Doesn't get returned with iteritems inst['name'] = instance['name'] return inst def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc', limit=None, marker=None): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retrieve all instances in the system. Deleted instances will be returned by default, unless there is a search option that says otherwise. The results will be returned sorted in the order specified by the 'sort_dir' parameter using the key specified in the 'sort_key' parameter. """ #TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, } check_policy(context, "get_all", target) if search_opts is None: search_opts = {} LOG.debug(_("Searching by: %s") % str(search_opts)) # Fixups for the DB call filters = {} def _remap_flavor_filter(flavor_id): try: instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) except exception.FlavorNotFound: raise ValueError() filters['instance_type_id'] = instance_type['id'] def _remap_fixed_ip_filter(fixed_ip): # Turn fixed_ip into a regexp match. Since '.' matches # any character, we need to use regexp escaping for it. filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') # search_option to filter_name mapping. filter_mapping = { 'image': 'image_ref', 'name': 'display_name', 'tenant_id': 'project_id', 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} # copy from search_opts, doing various remappings as necessary for opt, value in search_opts.iteritems(): # Do remappings. # Values not in the filter_mapping table are copied as-is. # If remapping is None, option is not copied # If the remapping is a string, it is the filter_name to use try: remap_object = filter_mapping[opt] except KeyError: filters[opt] = value else: # Remaps are strings to translate to, or functions to call # to do the translating as defined by the table above. if isinstance(remap_object, basestring): filters[remap_object] = value else: try: remap_object(value) # We already know we can't match the filter, so # return an empty list except ValueError: return [] inst_models = self._get_instances_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker) # Convert the models to dictionaries instances = [] for inst_model in inst_models: instance = dict(inst_model.iteritems()) # NOTE(comstud): Doesn't get returned by iteritems instance['name'] = inst_model['name'] instances.append(instance) return instances def _get_instances_by_filters(self, context, filters, sort_key, sort_dir, limit=None, marker=None): if 'ip6' in filters or 'ip' in filters: res = self.network_api.get_instance_uuids_by_ip_filter(context, filters) # NOTE(jkoelker) It is possible that we will get the same # instance uuid twice (one for ipv4 and ipv6) uuids = set([r['instance_uuid'] for r in res]) filters['uuid'] = uuids return self.db.instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): """Backup the given instance :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot name = backup_type # daily backups are called 'daily' :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ recv_meta = self._create_image(context, instance, name, 'backup', backup_type=backup_type, rotation=rotation, extra_properties=extra_properties) return recv_meta @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def snapshot(self, context, instance, name, extra_properties=None): """Snapshot the given instance. :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: A dict containing image metadata """ return self._create_image(context, instance, name, 'snapshot', extra_properties=extra_properties) def _create_image(self, context, instance, name, image_type, backup_type=None, rotation=None, extra_properties=None): """Create snapshot or backup for an instance on this host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param name: string for name of the snapshot :param image_type: snapshot | backup :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ instance_uuid = instance['uuid'] if image_type == "snapshot": task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) # change instance state and notify old_vm_state = instance["vm_state"] old_task_state = instance["task_state"] self.db.instance_test_and_set( context, instance_uuid, 'task_state', [None], task_state) notifications.send_update_with_states(context, instance, old_vm_state, instance["vm_state"], old_task_state, instance["task_state"], service="api", verify_states=True) properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), 'image_type': image_type, } # Persist base image ref as a Glance image property system_meta = self.db.instance_system_metadata_get( context, instance_uuid) base_image_ref = system_meta.get('image_base_image_ref') if base_image_ref: properties['base_image_ref'] = base_image_ref sent_meta = {'name': name, 'is_public': False} if image_type == 'backup': properties['backup_type'] = backup_type elif image_type == 'snapshot': min_ram, min_disk = self._get_minram_mindisk_params(context, instance) if min_ram is not None: sent_meta['min_ram'] = min_ram if min_disk is not None: sent_meta['min_disk'] = min_disk properties.update(extra_properties or {}) sent_meta['properties'] = properties recv_meta = self.image_service.create(context, sent_meta) self.compute_rpcapi.snapshot_instance(context, instance=instance, image_id=recv_meta['id'], image_type=image_type, backup_type=backup_type, rotation=rotation) return recv_meta @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def snapshot_volume_backed(self, context, instance, image_meta, name, extra_properties=None): """Snapshot the given volume-backed instance. :param instance: nova.db.sqlalchemy.models.Instance :param image_meta: metadata for the new image :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: the new image metadata """ image_meta['name'] = name properties = image_meta['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] properties.update(extra_properties or {}) bdms = self.get_instance_bdms(context, instance) mapping = [] for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] snapshot = self.volume_api.create_snapshot_force( context, volume, name, volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in block_device.mappings_prepend_dev(properties.get('mappings', [])): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): image_meta.pop(attr, None) # the new image is simply a bucket of properties (particularly the # block device mapping, kernel and ramdisk IDs) with no image data, # hence the zero size image_meta['size'] = 0 return self.image_service.create(context, image_meta, data='') def _get_minram_mindisk_params(self, context, instance): try: #try to get source image of the instance orig_image = self.image_service.show(context, instance['image_ref']) except exception.ImageNotFound: return None, None #disk format of vhd is non-shrinkable if orig_image.get('disk_format') == 'vhd': min_ram = instance['instance_type']['memory_mb'] min_disk = instance['instance_type']['root_gb'] else: #set new image values to the original image values min_ram = orig_image.get('min_ram') min_disk = orig_image.get('min_disk') return min_ram, min_disk @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.RESCUED], task_state=[None, task_states.REBOOTING]) def reboot(self, context, instance, reboot_type): """Reboot the given instance.""" if (reboot_type == 'SOFT' and instance['task_state'] == task_states.REBOOTING): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state']) state = {'SOFT': task_states.REBOOTING, 'HARD': task_states.REBOOTING_HARD}[reboot_type] instance = self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=state, expected_task_state=[None, task_states.REBOOTING]) self.compute_rpcapi.reboot_instance(context, instance=instance, reboot_type=reboot_type) def _get_image(self, context, image_href): """Throws an ImageNotFound exception if image_href does not exist.""" (image_service, image_id) = glance.get_remote_image_service(context, image_href) return image_service.show(context, image_id) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], task_state=[None]) def rebuild(self, context, instance, image_href, admin_password, **kwargs): """Rebuild the given instance with the provided attributes.""" orig_image_ref = instance['image_ref'] image = self._get_image(context, image_href) files_to_inject = kwargs.pop('files_to_inject', []) self._check_injected_file_quota(context, files_to_inject) metadata = kwargs.get('metadata', {}) self._check_metadata_properties_quota(context, metadata) instance_type = instance['instance_type'] if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() (image_service, image_id) = glance.get_remote_image_service(context, image_href) image = image_service.show(context, image_id) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, None, None, image, image_service) def _reset_image_metadata(): """ Remove old image properties that we're storing as instance system metadata. These properties start with 'image_'. Then add the properites for the new image. """ # FIXME(comstud): There's a race condition here in that # if the system_metadata for this instance is updated # after we do the get and before we update.. those other # updates will be lost. Since this problem exists in a lot # of other places, I think it should be addressed in a DB # layer overhaul. sys_metadata = self.db.instance_system_metadata_get(context, instance['uuid']) orig_sys_metadata = dict(sys_metadata) # Remove the old keys for key in sys_metadata.keys(): if key.startswith('image_'): del sys_metadata[key] # Add the new ones for key, value in image['properties'].iteritems(): new_value = str(value)[:255] sys_metadata['image_%s' % key] = new_value self.db.instance_system_metadata_update(context, instance['uuid'], sys_metadata, True) return orig_sys_metadata instance = self.update(context, instance, task_state=task_states.REBUILDING, expected_task_state=None, # Unfortunately we need to set image_ref early, # so API users can see it. image_ref=image_href, kernel_id=kernel_id or "", ramdisk_id=ramdisk_id or "", progress=0, **kwargs) # On a rebuild, since we're potentially changing images, we need to # wipe out the old image properties that we're storing as instance # system metadata... and copy in the properties for the new image. orig_sys_metadata = _reset_image_metadata() self.compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=admin_password, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') # reverse quota reservation for increased resource usage deltas = self._reverse_upsize_quota_delta(context, migration_ref) reservations = self._reserve_quota_delta(context, deltas) instance = self.update(context, instance, task_state=task_states.RESIZE_REVERTING, expected_task_state=None) self.compute_rpcapi.revert_resize(context, instance=instance, migration_id=migration_ref['id'], host=migration_ref['dest_compute'], reservations=reservations) self.db.migration_update(context, migration_ref['id'], {'status': 'reverted'}) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') # reserve quota only for any decrease in resource usage deltas = self._downsize_quota_delta(context, migration_ref) reservations = self._reserve_quota_delta(context, deltas) instance = self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, expected_task_state=None) self.compute_rpcapi.confirm_resize(context, instance=instance, migration_id=migration_ref['id'], host=migration_ref['source_compute'], reservations=reservations) self.db.migration_update(context, migration_ref['id'], {'status': 'confirmed'}) @staticmethod def _resize_quota_delta(context, new_instance_type, old_instance_type, sense, compare): """ Calculate any quota adjustment required at a particular point in the resize cycle. :param context: the request context :param new_instance_type: the target instance type :param old_instance_type: the original instance type :param sense: the sense of the adjustment, 1 indicates a forward adjustment, whereas -1 indicates a reversal of a prior adjustment :param compare: the direction of the comparison, 1 indicates we're checking for positive deltas, whereas -1 indicates negative deltas """ def _quota_delta(resource): return sense * (new_instance_type[resource] - old_instance_type[resource]) deltas = {} if compare * _quota_delta('vcpus') > 0: deltas['cores'] = _quota_delta('vcpus') if compare * _quota_delta('memory_mb') > 0: deltas['ram'] = _quota_delta('memory_mb') return deltas @staticmethod def _upsize_quota_delta(context, new_instance_type, old_instance_type): """ Calculate deltas required to adjust quota for an instance upsize. """ return API._resize_quota_delta(context, new_instance_type, old_instance_type, 1, 1) @staticmethod def _reverse_upsize_quota_delta(context, migration_ref): """ Calculate deltas required to reverse a prior upsizing quota adjustment. """ old_instance_type = instance_types.get_instance_type( migration_ref['old_instance_type_id']) new_instance_type = instance_types.get_instance_type( migration_ref['new_instance_type_id']) return API._resize_quota_delta(context, new_instance_type, old_instance_type, -1, -1) @staticmethod def _downsize_quota_delta(context, migration_ref): """ Calculate deltas required to adjust quota for an instance downsize. """ old_instance_type = instance_types.get_instance_type( migration_ref['old_instance_type_id']) new_instance_type = instance_types.get_instance_type( migration_ref['new_instance_type_id']) return API._resize_quota_delta(context, new_instance_type, old_instance_type, 1, -1) @staticmethod def _reserve_quota_delta(context, deltas): return QUOTAS.reserve(context, **deltas) if deltas else None @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], task_state=[None]) def resize(self, context, instance, flavor_id=None, **kwargs): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ current_instance_type = instance['instance_type'] # If flavor_id is not provided, only migrate the instance. if not flavor_id: LOG.debug(_("flavor_id is None. Assuming migration."), instance=instance) new_instance_type = current_instance_type else: new_instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s"), locals(), instance=instance) # FIXME(sirp): both of these should raise InstanceTypeNotFound instead if not new_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) same_instance_type = (current_instance_type['id'] == new_instance_type['id']) # NOTE(sirp): We don't want to force a customer to change their flavor # when Ops is migrating off of a failed host. if new_instance_type['disabled'] and not same_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) # NOTE(markwash): look up the image early to avoid auth problems later image = self.image_service.show(context, instance['image_ref']) if same_instance_type and flavor_id: raise exception.CannotResizeToSameFlavor() # ensure there is sufficient headroom for upsizes deltas = self._upsize_quota_delta(context, new_instance_type, current_instance_type) try: reservations = self._reserve_quota_delta(context, deltas) except exception.OverQuota as exc: quotas = exc.kwargs['quotas'] usages = exc.kwargs['usages'] overs = exc.kwargs['overs'] headroom = dict((res, quotas[res] - (usages[res]['in_use'] + usages[res]['reserved'])) for res in quotas.keys()) resource = overs[0] used = quotas[resource] - headroom[resource] total_allowed = used + headroom[resource] overs = ','.join(overs) pid = context.project_id LOG.warn(_("%(overs)s quota exceeded for %(pid)s," " tried to resize instance. %(msg)s"), locals()) raise exception.TooManyInstances(overs=overs, req=deltas[resource], used=used, allowed=total_allowed, resource=resource) instance = self.update(context, instance, task_state=task_states.RESIZE_PREP, expected_task_state=None, progress=0, **kwargs) request_spec = { 'instance_type': new_instance_type, 'instance_uuids': [instance['uuid']], 'instance_properties': instance} filter_properties = {'ignore_hosts': []} if not FLAGS.allow_resize_to_same_host: filter_properties['ignore_hosts'].append(instance['host']) args = { "instance": instance, "instance_type": new_instance_type, "image": image, "request_spec": jsonutils.to_primitive(request_spec), "filter_properties": filter_properties, "reservations": reservations, } self.scheduler_rpcapi.prep_resize(context, **args) @wrap_check_policy @check_instance_lock def add_fixed_ip(self, context, instance, network_id): """Add fixed_ip from specified network to given instance.""" self.compute_rpcapi.add_fixed_ip_to_instance(context, instance=instance, network_id=network_id) @wrap_check_policy @check_instance_lock def remove_fixed_ip(self, context, instance, address): """Remove fixed_ip from specified network to given instance.""" self.compute_rpcapi.remove_fixed_ip_from_instance(context, instance=instance, address=address) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) def pause(self, context, instance): """Pause the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.PAUSING, expected_task_state=None) self.compute_rpcapi.pause_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.PAUSED]) def unpause(self, context, instance): """Unpause the given instance.""" self.update(context, instance, vm_state=vm_states.PAUSED, task_state=task_states.UNPAUSING, expected_task_state=None) self.compute_rpcapi.unpause_instance(context, instance=instance) @wrap_check_policy def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self.compute_rpcapi.get_diagnostics(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) def suspend(self, context, instance): """Suspend the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING, expected_task_state=None) self.compute_rpcapi.suspend_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SUSPENDED]) def resume(self, context, instance): """Resume the given instance.""" self.update(context, instance, vm_state=vm_states.SUSPENDED, task_state=task_states.RESUMING, expected_task_state=None) self.compute_rpcapi.resume_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING, expected_task_state=None) self.compute_rpcapi.rescue_instance(context, instance=instance, rescue_password=rescue_password) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESCUED]) def unrescue(self, context, instance): """Unrescue the given instance.""" self.update(context, instance, vm_state=vm_states.RESCUED, task_state=task_states.UNRESCUING, expected_task_state=None) self.compute_rpcapi.unrescue_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) def set_admin_password(self, context, instance, password=None): """Set the root/admin password for the given instance.""" self.update(context, instance, task_state=task_states.UPDATING_PASSWORD, expected_task_state=None) self.compute_rpcapi.set_admin_password(context, instance=instance, new_pass=password) @wrap_check_policy @check_instance_lock def inject_file(self, context, instance, path, file_contents): """Write a file to the given instance.""" self.compute_rpcapi.inject_file(context, instance=instance, path=path, file_contents=file_contents) @wrap_check_policy def get_vnc_console(self, context, instance, console_type): """Get a url to an instance Console.""" if not instance['host']: raise exception.InstanceNotReady(instance=instance) connect_info = self.compute_rpcapi.get_vnc_console(context, instance=instance, console_type=console_type) self.consoleauth_rpcapi.authorize_console(context, connect_info['token'], console_type, connect_info['host'], connect_info['port'], connect_info['internal_access_path']) return {'url': connect_info['access_url']} @wrap_check_policy def get_console_output(self, context, instance, tail_length=None): """Get console output for an instance.""" return self.compute_rpcapi.get_console_output(context, instance=instance, tail_length=tail_length) @wrap_check_policy def lock(self, context, instance): """Lock the given instance.""" context = context.elevated() instance_uuid = instance['uuid'] LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, locked=True) @wrap_check_policy def unlock(self, context, instance): """Unlock the given instance.""" context = context.elevated() instance_uuid = instance['uuid'] LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, locked=False) @wrap_check_policy def get_lock(self, context, instance): """Return the boolean state of given instance's lock.""" return self.get(context, instance['uuid'])['locked'] @wrap_check_policy @check_instance_lock def reset_network(self, context, instance): """Reset networking on the instance.""" self.compute_rpcapi.reset_network(context, instance=instance) @wrap_check_policy @check_instance_lock def inject_network_info(self, context, instance): """Inject network info for the instance.""" self.compute_rpcapi.inject_network_info(context, instance=instance) @wrap_check_policy @check_instance_lock def attach_volume(self, context, instance, volume_id, device=None): """Attach an existing volume to an existing instance.""" # NOTE(vish): Fail fast if the device is not going to pass. This # will need to be removed along with the test if we # change the logic in the manager for what constitutes # a valid device. if device and not block_device.match_device(device): raise exception.InvalidDevicePath(path=device) # NOTE(vish): This is done on the compute host because we want # to avoid a race where two devices are requested at # the same time. When db access is removed from # compute, the bdm will be created here and we will # have to make sure that they are assigned atomically. device = self.compute_rpcapi.reserve_block_device_name( context, device=device, instance=instance) try: volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume) self.volume_api.reserve_volume(context, volume) self.compute_rpcapi.attach_volume(context, instance=instance, volume_id=volume_id, mountpoint=device) except Exception: with excutils.save_and_reraise_exception(): self.db.block_device_mapping_destroy_by_instance_and_device( context, instance['uuid'], device) return device @check_instance_lock def _detach_volume(self, context, instance, volume_id): check_policy(context, 'detach_volume', instance) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume) self.compute_rpcapi.detach_volume(context, instance=instance, volume_id=volume_id) return instance # FIXME(comstud): I wonder if API should pull in the instance from # the volume ID via volume API and pass it and the volume object here def detach_volume(self, context, volume_id): """Detach a volume from an instance.""" volume = self.volume_api.get(context, volume_id) if volume['attach_status'] == 'detached': msg = _("Volume must be attached in order to detach.") raise exception.InvalidVolume(reason=msg) instance_uuid = volume['instance_uuid'] instance = self.db.instance_get_by_uuid(context.elevated(), instance_uuid) if not instance: raise exception.VolumeUnattached(volume_id=volume_id) self._detach_volume(context, instance, volume_id) @wrap_check_policy def get_instance_metadata(self, context, instance): """Get all metadata associated with an instance.""" rv = self.db.instance_metadata_get(context, instance['uuid']) return dict(rv.iteritems()) @wrap_check_policy @check_instance_lock def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance['uuid'], key) instance['metadata'] = {} notifications.send_update(context, instance, instance) self.compute_rpcapi.change_instance_metadata(context, instance=instance, diff={key: ['-']}) @wrap_check_policy @check_instance_lock def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ orig = self.get_instance_metadata(context, instance) if delete: _metadata = metadata else: _metadata = orig.copy() _metadata.update(metadata) self._check_metadata_properties_quota(context, _metadata) metadata = self.db.instance_metadata_update(context, instance['uuid'], _metadata, True) instance['metadata'] = metadata notifications.send_update(context, instance, instance) diff = utils.diff_dict(orig, _metadata) self.compute_rpcapi.change_instance_metadata(context, instance=instance, diff=diff) return _metadata def get_instance_faults(self, context, instances): """Get all faults for a list of instance uuids.""" if not instances: return {} for instance in instances: check_policy(context, 'get_instance_faults', instance) uuids = [instance['uuid'] for instance in instances] return self.db.instance_fault_get_by_instance_uuids(context, uuids) def get_instance_bdms(self, context, instance): """Get all bdm tables for specified instance.""" return self.db.block_device_mapping_get_all_by_instance(context, instance['uuid']) def is_volume_backed_instance(self, context, instance, bdms): bdms = bdms or self.get_instance_bdms(context, instance) for bdm in bdms: if (block_device.strip_dev(bdm.device_name) == block_device.strip_dev(instance['root_device_name'])): return True else: return False @check_instance_state(vm_state=[vm_states.ACTIVE]) def live_migrate(self, context, instance, block_migration, disk_over_commit, host): """Migrate a server lively to a new host.""" LOG.debug(_("Going to try to live migrate instance to %s"), host, instance=instance) instance = self.update(context, instance, task_state=task_states.MIGRATING, expected_task_state=None) self.scheduler_rpcapi.live_migration(context, block_migration, disk_over_commit, instance, host) class HostAPI(base.Base): def __init__(self): self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(HostAPI, self).__init__() """Sub-set of the Compute Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.set_host_enabled(context, enabled=enabled, host=host) def get_host_uptime(self, context, host): """Returns the result of calling "uptime" on the target host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.get_host_uptime(context, host=host) def host_power_action(self, context, host, action): """Reboots, shuts down or powers up the host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.host_power_action(context, action=action, host=host) def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self.compute_rpcapi.host_maintenance_mode(context, host_param=host, mode=mode, host=host) class AggregateAPI(base.Base): """Sub-set of the Compute Manager API for managing host aggregates.""" def __init__(self, **kwargs): self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(AggregateAPI, self).__init__(**kwargs) def create_aggregate(self, context, aggregate_name, availability_zone): """Creates the model for the aggregate.""" zones = [s.availability_zone for s in self.db.service_get_all_by_topic(context, FLAGS.compute_topic)] if availability_zone in zones: values = {"name": aggregate_name, "availability_zone": availability_zone} aggregate = self.db.aggregate_create(context, values) return dict(aggregate.iteritems()) else: raise exception.InvalidAggregateAction(action='create_aggregate', aggregate_id="'N/A'", reason='invalid zone') def get_aggregate(self, context, aggregate_id): """Get an aggregate by id.""" aggregate = self.db.aggregate_get(context, aggregate_id) return self._get_aggregate_info(context, aggregate) def get_aggregate_list(self, context): """Get all the aggregates.""" aggregates = self.db.aggregate_get_all(context) return [self._get_aggregate_info(context, a) for a in aggregates] def update_aggregate(self, context, aggregate_id, values): """Update the properties of an aggregate.""" aggregate = self.db.aggregate_update(context, aggregate_id, values) return self._get_aggregate_info(context, aggregate) def update_aggregate_metadata(self, context, aggregate_id, metadata): """Updates the aggregate metadata. If a key is set to None, it gets removed from the aggregate metadata. """ for key in metadata.keys(): if not metadata[key]: try: self.db.aggregate_metadata_delete(context, aggregate_id, key) metadata.pop(key) except exception.AggregateMetadataNotFound, e: LOG.warn(e.message) self.db.aggregate_metadata_add(context, aggregate_id, metadata) return self.get_aggregate(context, aggregate_id) def delete_aggregate(self, context, aggregate_id): """Deletes the aggregate.""" hosts = self.db.aggregate_host_get_all(context, aggregate_id) if len(hosts) > 0: raise exception.InvalidAggregateAction(action='delete', aggregate_id=aggregate_id, reason='not empty') self.db.aggregate_delete(context, aggregate_id) def add_host_to_aggregate(self, context, aggregate_id, host): """Adds the host to an aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] aggregate = self.db.aggregate_get(context, aggregate_id) if service.availability_zone != aggregate.availability_zone: raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason='availability zone mismatch') self.db.aggregate_host_add(context, aggregate_id, host) #NOTE(jogo): Send message to host to support resource pools self.compute_rpcapi.add_aggregate_host(context, aggregate_id=aggregate_id, host_param=host, host=host) return self.get_aggregate(context, aggregate_id) def remove_host_from_aggregate(self, context, aggregate_id, host): """Removes host from the aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] self.db.aggregate_host_delete(context, aggregate_id, host) self.compute_rpcapi.remove_aggregate_host(context, aggregate_id=aggregate_id, host_param=host, host=host) return self.get_aggregate(context, aggregate_id) def _get_aggregate_info(self, context, aggregate): """Builds a dictionary with aggregate props, metadata and hosts.""" metadata = self.db.aggregate_metadata_get(context, aggregate.id) hosts = self.db.aggregate_host_get_all(context, aggregate.id) result = dict(aggregate.iteritems()) result["metadata"] = metadata result["hosts"] = hosts return result class KeypairAPI(base.Base): """Sub-set of the Compute Manager API for managing key pairs.""" def __init__(self, **kwargs): super(KeypairAPI, self).__init__(**kwargs) def _validate_keypair_name(self, context, user_id, key_name): safechars = "_- " + string.digits + string.ascii_letters clean_value = "".join(x for x in key_name if x in safechars) if clean_value != key_name: msg = _("Keypair name contains unsafe characters") raise exception.InvalidKeypair(explanation=msg) if not 0 < len(key_name) < 256: msg = _('Keypair name must be between 1 and 255 characters long') raise exception.InvalidKeypair(explanation=msg) # NOTE: check for existing keypairs of same name try: self.db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass def import_key_pair(self, context, user_id, key_name, public_key): """Import a key pair using an existing public key.""" self._validate_keypair_name(context, user_id, key_name) count = QUOTAS.count(context, 'key_pairs', user_id) try: QUOTAS.limit_check(context, key_pairs=count + 1) except exception.OverQuota: raise exception.KeypairLimitExceeded() try: fingerprint = crypto.generate_fingerprint(public_key) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.InvalidKeypair(explanation=msg) keypair = {'user_id': user_id, 'name': key_name, 'fingerprint': fingerprint, 'public_key': public_key} self.db.key_pair_create(context, keypair) return keypair def create_key_pair(self, context, user_id, key_name): """Create a new key pair.""" self._validate_keypair_name(context, user_id, key_name) count = QUOTAS.count(context, 'key_pairs', user_id) try: QUOTAS.limit_check(context, key_pairs=count + 1) except exception.OverQuota: raise exception.KeypairLimitExceeded() private_key, public_key, fingerprint = crypto.generate_key_pair() keypair = {'user_id': user_id, 'name': key_name, 'fingerprint': fingerprint, 'public_key': public_key, 'private_key': private_key} self.db.key_pair_create(context, keypair) return keypair def delete_key_pair(self, context, user_id, key_name): """Delete a keypair by name.""" self.db.key_pair_destroy(context, user_id, key_name) def get_key_pairs(self, context, user_id): """List key pairs.""" key_pairs = self.db.key_pair_get_all_by_user(context, user_id) rval = [] for key_pair in key_pairs: rval.append({ 'name': key_pair['name'], 'public_key': key_pair['public_key'], 'fingerprint': key_pair['fingerprint'], }) return rval def get_key_pair(self, context, user_id, key_name): """Get a keypair by name.""" key_pair = self.db.key_pair_get(context, user_id, key_name) return {'name': key_pair['name'], 'public_key': key_pair['public_key'], 'fingerprint': key_pair['fingerprint']} class SecurityGroupAPI(base.Base): """ Sub-set of the Compute API related to managing security groups and security group rules """ def __init__(self, **kwargs): super(SecurityGroupAPI, self).__init__(**kwargs) self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) def validate_property(self, value, property, allowed): """ Validate given security group property. :param value: the value to validate, as a string or unicode :param property: the property, either 'name' or 'description' :param allowed: the range of characters allowed """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % property self.raise_invalid_property(msg) if not val: msg = _("Security group %s cannot be empty.") % property self.raise_invalid_property(msg) if allowed and not re.match(allowed, val): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. msg = (_("Value (%(value)s) for parameter Group%(property)s is " "invalid. Content limited to '%(allowed)'.") % dict(value=value, allowed=allowed, property=property.capitalize())) self.raise_invalid_property(msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % property self.raise_invalid_property(msg) def ensure_default(self, context): """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context """ existed, group = self.db.security_group_ensure_default(context) if not existed: self.sgh.trigger_security_group_create_refresh(context, group) def create(self, context, name, description): try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") self.raise_over_quota(msg) LOG.audit(_("Create Security Group %s"), name, context=context) try: self.ensure_default(context) if self.db.security_group_exists(context, context.project_id, name): msg = _('Security group %s already exists') % name self.raise_group_already_exists(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': name, 'description': description} group_ref = self.db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return group_ref def get(self, context, name=None, id=None, map_exception=False): self.ensure_default(context) try: if name: return self.db.security_group_get_by_name(context, context.project_id, name) elif id: return self.db.security_group_get(context, id) except exception.NotFound as exp: if map_exception: msg = unicode(exp) self.raise_not_found(msg) else: raise def list(self, context, names=None, ids=None, project=None, search_opts=None): self.ensure_default(context) groups = [] if names or ids: if names: for name in names: groups.append(self.db.security_group_get_by_name(context, project, name)) if ids: for id in ids: groups.append(self.db.security_group_get(context, id)) elif context.is_admin: # TODO(eglynn): support a wider set of search options than just # all_tenants, at least include the standard filters defined for # the EC2 DescribeSecurityGroups API for the non-admin case also if (search_opts and 'all_tenants' in search_opts): groups = self.db.security_group_get_all(context) else: groups = self.db.security_group_get_by_project(context, project) elif project: groups = self.db.security_group_get_by_project(context, project) return groups def destroy(self, context, security_group): if self.db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") self.raise_invalid_group(msg) # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), security_group.name, context=context) self.db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) def is_associated_with_server(self, security_group, instance_uuid): """Check if the security group is already associated with the instance. If Yes, return True. """ if not security_group: return False instances = security_group.get('instances') if not instances: return False for inst in instances: if (instance_uuid == inst['uuid']): return True return False @wrap_check_security_groups_policy def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_add_security_group(context.elevated(), instance_uuid, security_group['id']) # NOTE(comstud): No instance_uuid argument to this compute manager # call self.security_group_rpcapi.refresh_security_group_rules(context, security_group['id'], host=instance['host']) self.trigger_handler('instance_add_security_group', context, instance, security_group_name) @wrap_check_security_groups_policy def remove_from_instance(self, context, instance, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if not self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_remove_security_group(context.elevated(), instance_uuid, security_group['id']) # NOTE(comstud): No instance_uuid argument to this compute manager # call self.security_group_rpcapi.refresh_security_group_rules(context, security_group['id'], host=instance['host']) self.trigger_handler('instance_remove_security_group', context, instance, security_group_name) def trigger_handler(self, event, *args): handle = getattr(self.sgh, 'trigger_%s_refresh' % event) handle(*args) def trigger_rules_refresh(self, context, id): """Called when a rule is added to or removed from a security_group.""" security_group = self.db.security_group_get(context, id) for instance in security_group['instances']: if instance['host'] is not None: self.security_group_rpcapi.refresh_instance_security_rules( context, instance['host'], instance) def trigger_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for each instance for which this is relevant. """ # First, we get the security group rules that reference these groups as # the grantee.. security_group_rules = set() for group_id in group_ids: security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, group_id)) # ..then we distill the rules into the groups to which they belong.. security_groups = set() for rule in security_group_rules: security_group = self.db.security_group_get( context, rule['parent_group_id']) security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) # ..then we send a request to refresh the rules for each instance. for instance in instances: if instance['host']: self.security_group_rpcapi.refresh_instance_security_rules( context, instance['host'], instance) def parse_cidr(self, cidr): if cidr: try: cidr = urllib.unquote(cidr).decode() except Exception as e: self.raise_invalid_cidr(cidr, e) if not utils.is_valid_cidr(cidr): self.raise_invalid_cidr(cidr) return cidr else: return '0.0.0.0/0' @staticmethod def new_group_ingress_rule(grantee_group_id, protocol, from_port, to_port): return SecurityGroupAPI._new_ingress_rule(protocol, from_port, to_port, group_id=grantee_group_id) @staticmethod def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port): return SecurityGroupAPI._new_ingress_rule(protocol, from_port, to_port, cidr=grantee_cidr) @staticmethod def _new_ingress_rule(ip_protocol, from_port, to_port, group_id=None, cidr=None): values = {} if group_id: values['group_id'] = group_id # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 elif cidr: values['cidr'] = cidr if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if cidr: return None return values def rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule.get('id') or True return False def get_rule(self, context, id): self.ensure_default(context) try: return self.db.security_group_rule_get(context, id) except exception.NotFound: msg = _("Rule (%s) not found") % id self.raise_not_found(msg) def add_rules(self, context, id, name, vals): count = QUOTAS.count(context, 'security_group_rules', id) try: projected = count + len(vals) QUOTAS.limit_check(context, security_group_rules=projected) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") self.raise_over_quota(msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, name, context=context) rules = [self.db.security_group_rule_create(context, v) for v in vals] self.trigger_rules_refresh(context, id=id) self.trigger_handler('security_group_rule_create', context, [r['id'] for r in rules]) return rules def remove_rules(self, context, security_group, rule_ids): msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) for rule_id in rule_ids: self.db.security_group_rule_destroy(context, rule_id) # NOTE(vish): we removed some rules, so refresh self.trigger_rules_refresh(context, id=security_group['id']) self.trigger_handler('security_group_rule_destroy', context, rule_ids) @staticmethod def raise_invalid_property(msg): raise NotImplementedError() @staticmethod def raise_group_already_exists(msg): raise NotImplementedError() @staticmethod def raise_invalid_group(msg): raise NotImplementedError() @staticmethod def raise_invalid_cidr(cidr, decoding_exception=None): raise NotImplementedError() @staticmethod def raise_over_quota(msg): raise NotImplementedError() @staticmethod def raise_not_found(msg): raise NotImplementedError()
./CrossVul/dataset_final_sorted/CWE-264/py/good_5539_0
crossvul-python_data_bad_3632_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova.openstack.common import cfg from nova import flags quota_opts = [ cfg.IntOpt('quota_instances', default=10, help='number of instances allowed per project'), cfg.IntOpt('quota_cores', default=20, help='number of instance cores allowed per project'), cfg.IntOpt('quota_ram', default=50 * 1024, help='megabytes of instance ram allowed per project'), cfg.IntOpt('quota_volumes', default=10, help='number of volumes allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of volume gigabytes allowed per project'), cfg.IntOpt('quota_floating_ips', default=10, help='number of floating ips allowed per project'), cfg.IntOpt('quota_metadata_items', default=128, help='number of metadata items allowed per instance'), cfg.IntOpt('quota_injected_files', default=5, help='number of injected files allowed'), cfg.IntOpt('quota_injected_file_content_bytes', default=10 * 1024, help='number of bytes allowed per injected file'), cfg.IntOpt('quota_injected_file_path_bytes', default=255, help='number of bytes allowed per injected file path'), ] FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) quota_resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores'] def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_injected_files, 'injected_file_content_bytes': FLAGS.quota_injected_file_content_bytes, } # -1 in the quota flags means unlimited return defaults def get_class_quotas(context, quota_class, defaults=None): """Update defaults with the quota class values.""" if not defaults: defaults = _get_default_quotas() quota = db.quota_class_get_all_by_name(context, quota_class) for key in defaults.keys(): if key in quota: defaults[key] = quota[key] return defaults def get_project_quotas(context, project_id): defaults = _get_default_quotas() if context.quota_class: get_class_quotas(context, context.quota_class, defaults) quota = db.quota_get_all_by_project(context, project_id) for key in defaults.keys(): if key in quota: defaults[key] = quota[key] return defaults def _get_request_allotment(requested, used, quota): if quota == -1: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) if instance_type['vcpus']: allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus']) if instance_type['memory_mb']: allowed_instances = min(allowed_instances, allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_injected_file_path_bytes
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3632_4
crossvul-python_data_good_3693_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import sql from keystone import exception from keystone import token class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) @classmethod def from_dict(cls, token_dict): # shove any non-indexed properties into extra extra = copy.deepcopy(token_dict) data = {} for k in ('id', 'expires'): data[k] = extra.pop(k, None) data['extra'] = extra return cls(**data) def to_dict(self): out = copy.deepcopy(self.extra) out['id'] = self.id out['expires'] = self.expires return out class Token(sql.Base, token.Driver): # Public interface def get_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel).filter_by(id=token_id).first() now = datetime.datetime.utcnow() if token_ref and (not token_ref.expires or now < token_ref.expires): return token_ref.to_dict() else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data_copy: data_copy['expires'] = self._get_default_expire_time() token_ref = TokenModel.from_dict(data_copy) token_ref.id = token_id session = self.get_session() with session.begin(): session.add(token_ref) session.flush() return token_ref.to_dict() def delete_token(self, token_id): session = self.get_session() token_ref = session.query(TokenModel)\ .filter_by(id=token_id)\ .first() if not token_ref: raise exception.TokenNotFound(token_id=token_id) with session.begin(): session.delete(token_ref) session.flush() def list_tokens(self, user_id): session = self.get_session() tokens = [] now = datetime.datetime.utcnow() for token_ref in session.query(TokenModel)\ .filter(TokenModel.expires > now): token_ref_dict = token_ref.to_dict() if 'user' not in token_ref_dict: continue if token_ref_dict['user'].get('id') != user_id: continue tokens.append(token_ref['id']) return tokens
./CrossVul/dataset_final_sorted/CWE-264/py/good_3693_2
crossvul-python_data_good_3771_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import random import socket import StringIO import subprocess import unittest import nose.plugins.skip from glance.common import config from glance.common import utils from glance.common import wsgi from glance import context from glance.openstack.common import cfg CONF = cfg.CONF def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_tmp_dir = os.getenv('GLANCE_TEST_TMP_DIR', '/tmp') test_dir = os.path.join(test_tmp_dir, "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir class BaseTestCase(unittest.TestCase): def setUp(self): super(BaseTestCase, self).setUp() #NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) def tearDown(self): super(BaseTestCase, self).tearDown() CONF.reset() def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.iteritems(): CONF.set_override(k, v, group) class skip_test(object): """Decorator that skips a test.""" def __init__(self, msg): self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_if(object): """Decorator that skips a test if condition is true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_unless(object): """Decorator that skips a test if condition is not true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if not self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): raise nose.SkipTest(message) func(*a, **kwargs) return wrapped def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute executable = cmd.split()[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = "Command %(cmd)s did not succeed. Returned an exit "\ "code of %(exitcode)d."\ "\n\nSTDOUT: %(out)s"\ "\n\nSTDERR: %(err)s" % locals() if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() s.close() return port def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, str(value)) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write("XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', '1') except IOError, e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = { 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_tok = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_tok: user, tenant, role = auth_tok.split(':') if tenant.lower() == 'none': tenant = None roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, 'auth_tok': auth_tok, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or 'I am a teapot, short and stout\n' self.data = StringIO.StringIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default) def getheaders(self): return self.headers or {} def read(self, amt): self.data.read(amt)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3771_3
crossvul-python_data_bad_3633_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implementation of SQLAlchemy backend. """ import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import ipv6 from nova import utils from nova import log as logging from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS LOG = logging.getLogger("nova.db.sqlalchemy") def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def can_read_deleted(context): """Indicates if the context has access to deleted objects.""" if not context: return False return context.read_deleted def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requres the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.api.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requres the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.api.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and \ len(service_ref.compute_node) != 0: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): if not session: session = get_session() result = session.query(models.Service).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): session = get_session() query = session.query(models.Service).\ filter_by(deleted=can_read_deleted(context)) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): session = get_session() return session.query(models.Service).\ filter_by(deleted=False).\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): topic = 'compute' session = get_session() result = session.query(models.Service).\ options(joinedload('compute_node')).\ filter_by(deleted=False).\ filter_by(host=host).\ filter_by(topic=topic).\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return session.query(models.Service, func.coalesce(sort_value, 0)).\ filter_by(topic=topic).\ filter_by(deleted=False).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = session.query(models.Instance.host, func.sum(models.Instance.vcpus).label(label)).\ filter_by(deleted=False).\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_network_sorted(context): session = get_session() with session.begin(): topic = 'network' label = 'network_count' subq = session.query(models.Network.host, func.count(models.Network.id).label(label)).\ filter_by(deleted=False).\ group_by(models.Network.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = session.query(models.Volume.host, func.sum(models.Volume.size).label(label)).\ filter_by(deleted=False).\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): session = get_session() result = session.query(models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): if not session: session = get_session() result = session.query(models.ComputeNode).\ filter_by(id=compute_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_create(context, values): compute_node_ref = models.ComputeNode() compute_node_ref.update(values) compute_node_ref.save() return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values): session = get_session() with session.begin(): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) ################### @require_admin_context def certificate_get(context, certificate_id, session=None): if not session: session = get_session() result = session.query(models.Certificate).\ filter_by(id=certificate_id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_destroy(context, certificate_id): session = get_session() with session.begin(): certificate_ref = certificate_get(context, certificate_id, session=session) certificate_ref.delete(session=session) @require_admin_context def certificate_get_all_by_project(context, project_id): session = get_session() return session.query(models.Certificate).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): session = get_session() return session.query(models.Certificate).\ filter_by(user_id=user_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_get_all_by_user_and_project(_context, user_id, project_id): session = get_session() return session.query(models.Certificate).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() @require_admin_context def certificate_update(context, certificate_id, values): session = get_session() with session.begin(): certificate_ref = certificate_get(context, certificate_id, session=session) for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save(session=session) ################### @require_context def floating_ip_get(context, id): session = get_session() result = None if is_admin_context(context): result = session.query(models.FloatingIp).\ options(joinedload('fixed_ip')).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(id=id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.FloatingIp).\ options(joinedload('fixed_ip')).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=context.project_id).\ filter_by(id=id).\ filter_by(deleted=False).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_allocate_address(context, project_id): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = session.query(models.FloatingIp).\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() # TODO(tr3buchet): why leave auto_assigned floating IPs out? return session.query(models.FloatingIp).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ filter_by(deleted=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip = fixed_ip_ref floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = floating_ip_ref.fixed_ip if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) @require_admin_context def floating_ip_get_all(context): session = get_session() floating_ip_refs = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(deleted=False).\ all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): session = get_session() floating_ip_refs = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(host=host).\ filter_by(deleted=False).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ filter_by(deleted=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): if not session: session = get_session() result = session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.network')).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(deleted=False).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance is not None: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network: fixed_ip_ref.network = network_get(context, network_id, session=session) fixed_ip_ref.instance = instance_get(context, instance_id, session=session) session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(deleted=False).\ filter_by(instance=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if not fixed_ip_ref.network: fixed_ip_ref.network = network_get(context, network_id, session=session) if instance_id: fixed_ip_ref.instance = instance_get(context, instance_id, session=session) if host: fixed_ip_ref.host = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(_context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(_context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.instance = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(_context, host, time): session = get_session() inner_q = session.query(models.Network.id).\ filter_by(host=host).\ subquery() result = session.query(models.FixedIp).\ filter(models.FixedIp.network_id.in_(inner_q)).\ filter(models.FixedIp.updated_at < time).\ filter(models.FixedIp.instance_id != None).\ filter_by(allocated=False).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_admin_context def fixed_ip_get_all(context, session=None): if not session: session = get_session() result = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_admin_context def fixed_ip_get_all_by_instance_host(context, host=None): session = get_session() result = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ join(models.FixedIp.instance).\ filter_by(state=1).\ filter_by(host=host).\ all() if not result: raise exception.FixedIpNotFoundForHost(host=host) return result @require_context def fixed_ip_get_by_address(context, address, session=None): if not session: session = get_session() result = session.query(models.FixedIp).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('floating_ips')).\ options(joinedload('network')).\ options(joinedload('instance')).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): session = get_session() rv = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return rv @require_context def fixed_ip_get_by_network_host(context, network_id, host): session = get_session() rv = session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(host=host).\ filter_by(deleted=False).\ first() if not rv: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return rv @require_context def fixed_ip_get_by_virtual_interface(context, vif_id): session = get_session() rv = session.query(models.FixedIp).\ options(joinedload('floating_ips')).\ filter_by(virtual_interface_id=vif_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.FixedIpNotFoundForVirtualInterface(vif_id=vif_id) return rv @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in teh database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def virtual_interface_update(context, vif_id, values): """Update a virtual interface record in the database. :param vif_id: = id of virtual interface to update :param values: = values to update """ session = get_session() with session.begin(): vif_ref = virtual_interface_get(context, vif_id, session=session) vif_ref.update(values) vif_ref.save(session=session) return vif_ref @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ if not session: session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(id=vif_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(address=address).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(uuid=vif_uuid).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with. :param fixed_ip_id: = id of the fixed_ip """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(fixed_ip_id=fixed_ip_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retreive vifs for """ session = get_session() vif_refs = session.query(models.VirtualInterface).\ filter_by(instance_id=instance_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" session = get_session() vif_ref = session.query(models.VirtualInterface).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @require_admin_context def virtual_interface_get_by_network(context, network_id): """Gets all virtual_interface on network. :param network_id: = network to retreive vifs for """ session = get_session() vif_refs = session.query(models.VirtualInterface).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from teh database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() instance_ref['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_stop(context, instance_id): session = get_session() with session.begin(): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, 'vm_state': vm_states.STOPPED, 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'updated_at': literal_column('updated_at')}) @require_context def instance_get_by_uuid(context, uuid, session=None): partial = _build_instance_get(context, session=session) result = partial.filter_by(uuid=uuid) result = result.first() if not result: # FIXME(sirp): it would be nice if InstanceNotFound would accept a # uuid parameter as well raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): partial = _build_instance_get(context, session=session) result = partial.filter_by(id=instance_id) result = result.first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): if not session: session = get_session() partial = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('virtual_interfaces')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) if is_admin_context(context): partial = partial.filter_by(deleted=can_read_deleted(context)) elif is_user_context(context): partial = partial.filter_by(project_id=context.project_id).\ filter_by(deleted=False) return partial @require_admin_context def instance_get_all(context): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('virtual_interfaces.network')).\ options(joinedload_all( 'virtual_interfaces.fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_filters(context, filters): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_ipv6(instance, filter_re): for interface in instance['virtual_interfaces']: fixed_ipv6 = interface.get('fixed_ipv6') if fixed_ipv6 and filter_re.match(fixed_ipv6): return True return False def _regexp_filter_by_ip(instance, filter_re): for interface in instance['virtual_interfaces']: for fixed_ip in interface['fixed_ips']: if not fixed_ip or not fixed_ip['address']: continue if filter_re.match(fixed_ip['address']): return True for floating_ip in fixed_ip.get('floating_ips', []): if not floating_ip or not floating_ip['address']: continue if filter_re.match(floating_ip['address']): return True return False def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} \ for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False def _exact_match_filter(query, column, value): """Do exact match against a column. value to match can be a list so you can match any value in the list. """ if isinstance(value, list): column_attr = getattr(models.Instance, column) return query.filter(column_attr.in_(value)) else: filter_dict = {} filter_dict[column] = value return query.filter_by(**filter_dict) session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('virtual_interfaces.network')).\ options(joinedload_all( 'virtual_interfaces.fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(desc(models.Instance.created_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = filters['changes-since'] query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] for filter_name in query_filters: # Do the matching and remove the filter from the dictionary # so we don't try it again below.. query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {'ip6': _regexp_filter_by_ipv6, 'ip': _regexp_filter_by_ip} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('instance_type')).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_all_by_user(context, user_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @require_admin_context def instance_get_all_by_host(context, host): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() query = session.query(models.Instance).\ filter_by(reservation_id=reservation_id).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) if is_admin_context(context): return query.\ filter_by(deleted=can_read_deleted(context)).\ all() elif is_user_context(context): return query.\ filter_by(project_id=context.project_id).\ filter_by(deleted=False).\ all() @require_context def instance_get_by_fixed_ip(context, address): """Return instance ref by exact match of FixedIP""" fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.instance @require_context def instance_get_by_fixed_ipv6(context, address): """Return instance ref by exact match of IPv6""" session = get_session() # convert IPv6 address to mac mac = ipv6.to_mac(address) # get virtual interface vif_ref = virtual_interface_get_by_address(context, mac) # look up instance based on instance_id from vif row result = session.query(models.Instance).\ filter_by(id=vif_ref['instance_id']) return result @require_admin_context def instance_get_project_vpn(context, project_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @require_context def instance_get_fixed_addresses(context, instance_id): session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) try: fixed_ips = fixed_ip_get_by_instance(context, instance_id) except exception.NotFound: return [] return [fixed_ip.address for fixed_ip in fixed_ips] @require_context def instance_get_fixed_addresses_v6(context, instance_id): session = get_session() with session.begin(): # get instance instance_ref = instance_get(context, instance_id, session=session) # assume instance has 1 mac for each network associated with it # get networks associated with instance network_refs = network_get_all_by_instance(context, instance_id) # compile a list of cidr_v6 prefixes sorted by network id prefixes = [ref.cidr_v6 for ref in sorted(network_refs, key=lambda ref: ref.id)] # get vifs associated with instance vif_refs = virtual_interface_get_by_instance(context, instance_ref.id) # compile list of the mac_addresses for vifs sorted by network id macs = [vif_ref['address'] for vif_ref in sorted(vif_refs, key=lambda vif_ref: vif_ref['network_id'])] # get project id from instance project_id = instance_ref.project_id # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs]) # return list containing ipv6 address for each tuple return [ipv6.to_global(*t) for t in prefix_mac_tuples] @require_context def instance_get_floating_address(context, instance_id): fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id) if not fixed_ip_refs: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips if not fixed_ip_refs[0].floating_ips: return None # NOTE(vish): this just returns the first floating ip return fixed_ip_refs[0].floating_ips[0]['address'] @require_context def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_id, values.pop('metadata'), delete=True) with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() if utils.is_uuid_like(instance_id): instance = instance_get_by_uuid(context, instance_id, session) instance_id = instance.id return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all() ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) if not session: session = get_session() result = session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() return session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(deleted=False).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneosly picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return session.query(models.Network).\ filter_by(deleted=False).\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so assocaite # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): session = get_session() return session.query(models.Network).\ filter_by(deleted=can_read_deleted(context)).\ count() @require_admin_context def network_count_allocated_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_available_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=False).\ filter_by(reserved=False).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_reserved_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(reserved=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_create_safe(context, values): network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): network_ref = network_get(context, network_id=network_id, \ session=session) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_admin_context def network_disassociate_all(context): session = get_session() session.query(models.Network).\ update({'project_id': None, 'updated_at': literal_column('updated_at')}) @require_context def network_get(context, network_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Network).\ filter_by(id=network_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Network).\ filter_by(project_id=context.project_id).\ filter_by(id=network_id).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): session = get_session() result = session.query(models.Network).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): session = get_session() project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = session.query(models.Network).\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject(network_uuid=uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ options(joinedload_all('instance')).\ filter_by(network_id=network_id).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None).\ filter_by(deleted=False).\ all() @require_admin_context def network_get_by_bridge(context, bridge): session = get_session() result = session.query(models.Network).\ filter_by(bridge=bridge).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): session = get_session() result = session.query(models.Network).\ filter_by(uuid=uuid).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): session = get_session() result = session.query(models.Network).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(_context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from session = get_session() rv = session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ first() if not rv: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @require_admin_context def network_get_all_by_instance(_context, instance_id): session = get_session() rv = session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not rv: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @require_admin_context def network_get_all_by_host(context, host): session = get_session() with session.begin(): # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.FixedIp.host == host) return session.query(models.Network).\ filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter(host_filter).\ filter_by(deleted=False).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = session.query(models.Network).\ filter_by(id=network_id).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def export_device_count(context): session = get_session() return session.query(models.ExportDevice).\ filter_by(deleted=can_read_deleted(context)).\ count() @require_admin_context def export_device_create_safe(context, values): export_device_ref = models.ExportDevice() export_device_ref.update(values) try: export_device_ref.save() return export_device_ref except IntegrityError: return None ################### @require_admin_context def iscsi_target_count_by_host(context, host): session = get_session() return session.query(models.IscsiTarget).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): if session is None: session = get_session() tk = session.query(models.AuthToken).\ filter_by(token_hash=token_hash).\ filter_by(deleted=can_read_deleted(context)).\ first() if not tk: raise exception.AuthTokenNotFound(token=token_hash) return tk @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(_context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): if not session: session = get_session() result = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter_by(deleted=False).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() result = {'project_id': project_id} rows = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = session.query(models.Quota).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_admin_context def volume_allocate_shelf_and_blade(context, volume_id): session = get_session() with session.begin(): export_device = session.query(models.ExportDevice).\ filter_by(volume=None).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not export_device: raise db.NoMoreBlades() export_device.volume_id = volume_id session.add(export_device) return (export_device.shelf_id, export_device.blade_id) @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = session.query(models.IscsiTarget).\ filter_by(volume=None).\ filter_by(host=host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Volume.id), func.sum(models.Volume.size)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def volume_get(context, volume_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def volume_get_instance(context, volume_id): session = get_session() result = session.query(models.Volume).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_shelf_and_blade(context, volume_id): session = get_session() result = session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id) return (result.shelf_id, result.blade_id) @require_admin_context def volume_get_iscsi_target_num(context, volume_id): session = get_session() result = session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### @require_context @require_volume_exists def volume_metadata_get(context, volume_id): session = get_session() meta_results = session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() meta_dict = {} for i in meta_results: meta_dict[i['key']] = i['value'] return meta_dict @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): session = get_session() session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_delete_all(context, volume_id): session = get_session() session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): if not session: session = get_session() meta_result = session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not meta_result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return meta_result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Snapshot).\ filter_by(project_id=context.project_id).\ filter_by(id=snapshot_id).\ filter_by(deleted=False).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): session = get_session() return session.query(models.Snapshot).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Snapshot).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ filter_by(deleted=False).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ filter_by(deleted=False).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() if not result: return [] return result @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_context def security_group_get_all(context): session = get_session() return session.query(models.SecurityGroup).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload_all('rules')).\ all() @require_context def security_group_get(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroup).\ filter_by(deleted=can_read_deleted(context),).\ filter_by(id=security_group_id).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() else: result = session.query(models.SecurityGroup).\ filter_by(deleted=False).\ filter_by(id=security_group_id).\ filter_by(project_id=context.project_id).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): session = get_session() result = session.query(models.SecurityGroup).\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject(project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): session = get_session() return session.query(models.SecurityGroup).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ all() @require_context def security_group_get_by_instance(context, instance_id): session = get_session() return session.query(models.SecurityGroup).\ filter_by(deleted=False).\ options(joinedload_all('rules')).\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def security_group_destroy_all(context, session=None): if not session: session = get_session() with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_context def security_group_rule_get(context, security_group_rule_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(id=security_group_rule_id).\ first() else: # TODO(vish): Join to group and check for project_id result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() else: result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() return result @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): if not session: session = get_session() if is_admin_context(context): result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(group_id=security_group_id).\ all() else: result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(group_id=security_group_id).\ all() return result @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): session = get_session() return session.query(models.ProviderFirewallRule).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_admin_context def provider_fw_rule_get_all_by_cidr(context, cidr): session = get_session() return session.query(models.ProviderFirewallRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(cidr=cidr).\ all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): if not session: session = get_session() result = session.query(models.User).\ filter_by(id=id).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): if not session: session = get_session() result = session.query(models.User).\ filter_by(access_key=access_key).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(_context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): session = get_session() return session.query(models.User).\ filter_by(deleted=can_read_deleted(context)).\ all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) ################### def project_create(_context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): if not session: session = get_session() result = session.query(models.Project).\ filter_by(deleted=False).\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): session = get_session() return session.query(models.Project).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): session = get_session() user = session.query(models.User).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true session = get_session() result = session.query(models.Network).\ filter_by(project_id=project_id).\ filter_by(deleted=False).all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result @require_context def project_get_networks_v6(context, project_id): return project_get_networks(context, project_id) ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): if not session: session = get_session() result = session.query(models.Migration).\ filter_by(id=id).first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): session = get_session() result = session.query(models.Migration).\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): session = get_session() result = session.query(models.ConsolePool).\ filter_by(deleted=False).\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): session = get_session() result = session.query(models.ConsolePool).\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ filter_by(deleted=False).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType(host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): session = get_session() return session.query(models.ConsolePool).\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(deleted=False).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # consoles are meant to be transient. (mdragon) session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): session = get_session() result = session.query(models.Console).\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): session = get_session() results = session.query(models.Console).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ all() return results def console_get(context, console_id, instance_id=None): session = get_session() query = session.query(models.Console).\ filter_by(id=console_id) if instance_id: query = query.filter_by(instance_id=instance_id) result = query.options(joinedload('pool')).first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance(console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(_context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save() except Exception, e: raise exception.DBError(e) return instance_type_ref def _dict_with_extra_specs(inst_type_query): """Takes an instance OR volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in \ inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict @require_context def instance_type_get_all(context, inactive=False): """ Returns a dict describing all instance_types with name as key. """ session = get_session() if inactive: inst_types = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ order_by("name").\ all() else: inst_types = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() inst_dict = {} if inst_types: for i in inst_types: inst_dict[i['name']] = _dict_with_extra_specs(i) return inst_dict @require_context def instance_type_get(context, id): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not inst_type: raise exception.InstanceTypeNotFound(instance_type=id) else: return _dict_with_extra_specs(inst_type) @require_context def instance_type_get_by_name(context, name): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not inst_type: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return _dict_with_extra_specs(inst_type) @require_context def instance_type_get_by_flavor_id(context, id): """Returns a dict describing specific flavor_id""" try: flavor_id = int(id) except ValueError: raise exception.FlavorNotFound(flavor_id=id) session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ filter_by(flavorid=flavor_id).\ first() if not inst_type: raise exception.FlavorNotFound(flavor_id=flavor_id) else: return _dict_with_extra_specs(inst_type) @require_admin_context def instance_type_destroy(context, name): """ Marks specific instance_type as deleted""" session = get_session() instance_type_ref = session.query(models.InstanceTypes).\ filter_by(name=name) records = instance_type_ref.update(dict(deleted=True)) if records == 0: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref @require_admin_context def instance_type_purge(context, name): """ Removes specific instance_type from DB Usually instance_type_destroy should be used """ session = get_session() instance_type_ref = session.query(models.InstanceTypes).\ filter_by(name=name) records = instance_type_ref.delete() if records == 0: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref #################### @require_admin_context def zone_create(context, values): zone = models.Zone() zone.update(values) zone.save() return zone @require_admin_context def zone_update(context, zone_id, values): session = get_session() zone = session.query(models.Zone).filter_by(id=zone_id).first() if not zone: raise exception.ZoneNotFound(zone_id=zone_id) zone.update(values) zone.save(session=session) return zone @require_admin_context def zone_delete(context, zone_id): session = get_session() with session.begin(): session.query(models.Zone).\ filter_by(id=zone_id).\ delete() @require_admin_context def zone_get(context, zone_id): session = get_session() result = session.query(models.Zone).filter_by(id=zone_id).first() if not result: raise exception.ZoneNotFound(zone_id=zone_id) return result @require_admin_context def zone_get_all(context): session = get_session() return session.query(models.Zone).all() #################### @require_context @require_instance_exists def instance_metadata_get(context, instance_id): session = get_session() meta_results = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() meta_dict = {} for i in meta_results: meta_dict[i['key']] = i['value'] return meta_dict @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): session = get_session() session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_delete_all(context, instance_id): session = get_session() session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): if not session: session = get_session() meta_result = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not meta_result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return meta_result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): if not session: session = get_session() return session.query(models.AgentBuild).\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ filter_by(deleted=False).\ first() @require_admin_context def agent_build_get_all(context): session = get_session() return session.query(models.AgentBuild).\ filter_by(deleted=False).\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): session.query(models.AgentBuild).\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = session.query(models.AgentBuild).\ filter_by(id=agent_build_id). \ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def instance_type_extra_specs_get(context, instance_type_id): session = get_session() spec_results = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(deleted=False).\ all() spec_dict = {} for i in spec_results: spec_dict[i['key']] = i['value'] return spec_dict @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): session = get_session() session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): if not session: session = get_session() spec_result = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not spec_result: raise exception.\ InstanceTypeExtraSpecsNotFound(extra_specs_key=key, instance_type_id=instance_type_id) return spec_result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(_context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters={}): """ Returns a dict describing all volume_types with name as key. """ session = get_session() if inactive: vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ order_by("name").\ all() else: vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() vol_dict = {} if vol_types: for i in vol_types: vol_dict[i['name']] = _dict_with_extra_specs(i) return vol_dict @require_context def volume_type_get(context, id): """Returns a dict describing specific volume_type""" session = get_session() vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not vol_type: raise exception.VolumeTypeNotFound(volume_type=id) else: return _dict_with_extra_specs(vol_type) @require_context def volume_type_get_by_name(context, name): """Returns a dict describing specific volume_type""" session = get_session() vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not vol_type: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(vol_type) @require_admin_context def volume_type_destroy(context, name): """ Marks specific volume_type as deleted""" session = get_session() volume_type_ref = session.query(models.VolumeTypes).\ filter_by(name=name) records = volume_type_ref.update(dict(deleted=True)) if records == 0: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return volume_type_ref @require_admin_context def volume_type_purge(context, name): """ Removes specific volume_type from DB Usually volume_type_destroy should be used """ session = get_session() volume_type_ref = session.query(models.VolumeTypes).\ filter_by(name=name) records = volume_type_ref.delete() if records == 0: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return volume_type_ref #################### @require_context def volume_type_extra_specs_get(context, volume_type_id): session = get_session() spec_results = session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(deleted=False).\ all() spec_dict = {} for i in spec_results: spec_dict[i['key']] = i['value'] return spec_dict @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): if not session: session = get_session() spec_result = session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ first() if not spec_result: raise exception.\ VolumeTypeExtraSpecsNotFound(extra_specs_key=key, volume_type_id=volume_type_id) return spec_result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### @require_admin_context def vsa_create(context, values): """ Creates Virtual Storage Array record. """ try: vsa_ref = models.VirtualStorageArray() vsa_ref.update(values) vsa_ref.save() except Exception, e: raise exception.DBError(e) return vsa_ref @require_admin_context def vsa_update(context, vsa_id, values): """ Updates Virtual Storage Array record. """ session = get_session() with session.begin(): vsa_ref = vsa_get(context, vsa_id, session=session) vsa_ref.update(values) vsa_ref.save(session=session) return vsa_ref @require_admin_context def vsa_destroy(context, vsa_id): """ Deletes Virtual Storage Array record. """ session = get_session() with session.begin(): session.query(models.VirtualStorageArray).\ filter_by(id=vsa_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def vsa_get(context, vsa_id, session=None): """ Get Virtual Storage Array record by ID. """ if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(id=vsa_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=vsa_id).\ filter_by(deleted=False).\ first() if not result: raise exception.VirtualStorageArrayNotFound(id=vsa_id) return result @require_admin_context def vsa_get_all(context): """ Get all Virtual Storage Array records. """ session = get_session() return session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def vsa_get_all_by_project(context, project_id): """ Get all Virtual Storage Array records by project ID. """ authorize_project_context(context, project_id) session = get_session() return session.query(models.VirtualStorageArray).\ options(joinedload('vsa_instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() ####################
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_4
crossvul-python_data_good_3632_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import datetime import functools import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import utils from nova import log as logging from nova.compute import aggregate_states from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import and_ from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS flags.DECLARE('reserved_host_disk_mb', 'nova.scheduler.host_manager') flags.DECLARE('reserved_host_memory_mb', 'nova.scheduler.host_manager') LOG = logging.getLogger(__name__) def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): db.aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and service_ref.compute_node: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): result = model_query(context, models.Service, session=session).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic="compute").\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = model_query(context, models.Instance.host, func.sum(models.Instance.vcpus).label(label), session=session, read_deleted="no").\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_get_all(context, session=None): return model_query(context, models.ComputeNode, session=session).\ options(joinedload('service')).\ all() def _get_host_utilization(context, host, ram_mb, disk_gb): """Compute the current utilization of a given host.""" instances = instance_get_all_by_host(context, host) vms = len(instances) free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024) work = 0 for instance in instances: free_ram_mb -= instance.memory_mb free_disk_gb -= instance.root_gb free_disk_gb -= instance.ephemeral_gb if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING, vm_states.MIGRATING, vm_states.RESIZING]: work += 1 return dict(free_ram_mb=free_ram_mb, free_disk_gb=free_disk_gb, current_workload=work, running_vms=vms) def _adjust_compute_node_values_for_utilization(context, values, session): service_ref = service_get(context, values['service_id'], session=session) host = service_ref['host'] ram_mb = values['memory_mb'] disk_gb = values['local_gb'] values.update(_get_host_utilization(context, host, ram_mb, disk_gb)) @require_admin_context def compute_node_create(context, values, session=None): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" if not session: session = get_session() _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_node_ref = models.ComputeNode() session.add(compute_node_ref) compute_node_ref.update(values) return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values, auto_adjust): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" session = get_session() if auto_adjust: _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) def compute_node_get_by_host(context, host): """Get all capacity entries for the given host.""" session = get_session() with session.begin(): node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False) return node.first() def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): """Update a specific ComputeNode entry by a series of deltas. Do this as a single atomic action and lock the row for the duration of the operation. Requires that ComputeNode record exist.""" session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) # This table thingy is how we get atomic UPDATE x = x + 1 # semantics. table = models.ComputeNode.__table__ if free_ram_mb_delta != 0: compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta if free_disk_gb_delta != 0: compute_node.free_disk_gb = (table.c.free_disk_gb + free_disk_gb_delta) if work_delta != 0: compute_node.current_workload = (table.c.current_workload + work_delta) if vm_delta != 0: compute_node.running_vms = table.c.running_vms + vm_delta return compute_node def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): """Like compute_node_utilization_update() modify a specific host entry. But this function will set the metrics absolutely (vs. a delta update). """ session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) if free_ram_mb != None: compute_node.free_ram_mb = free_ram_mb if free_disk_gb != None: compute_node.free_disk_gb = free_disk_gb if work != None: compute_node.current_workload = work if vms != None: compute_node.running_vms = vms return compute_node ################### @require_admin_context def certificate_get(context, certificate_id, session=None): result = model_query(context, models.Certificate, session=session).\ filter_by(id=certificate_id).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_admin_context def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_get_pools(context): session = get_session() pools = [] for result in session.query(models.FloatingIp.pool).distinct(): pools.append({'name': result[0]}) return pools @require_context def floating_ip_allocate_address(context, project_id, pool): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = fixed_ip_get(context, floating_ip_ref['fixed_ip_id']) if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) def _floating_ip_get_all(context): return model_query(context, models.FloatingIp, read_deleted="no") @require_admin_context def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address, session=None): if not session: session = get_session() fixed_ip = fixed_ip_get_by_address(context, fixed_address, session) fixed_ip_id = fixed_ip['id'] return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None): if not session: session = get_session() return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) @require_context def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) @require_admin_context def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref @require_admin_context def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone domain_ref.save(session=session) @require_admin_context def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project domain_ref.save(session=session) @require_admin_context def dnsdomain_unregister(context, fqdomain): session = get_session() with session.begin(): session.query(models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() @require_context def dnsdomain_list(context): session = get_session() records = model_query(context, models.DNSDomain, session=session, read_deleted="no").\ with_lockmode('update').all() domains = [] for record in records: domains.append(record.domain) return domains ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance_id: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network_id: fixed_ip_ref.network_id = network_id fixed_ip_ref.instance_id = instance_id session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_id=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if fixed_ip_ref['network_id'] is None: fixed_ip_ref['network'] = network_id if instance_id: fixed_ip_ref['instance_id'] = instance_id if host: fixed_ip_ref['host'] = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref['instance_id'] = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == True), models.Network.host == host) result = session.query(models.FixedIp.id).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.allocated == False).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.id == models.FixedIp.instance_id)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, session=None): result = model_query(context, models.FixedIp, session=session).\ filter_by(id=id).\ first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_all(context, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ filter_by(address=address).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return result @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ all() return result @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def _virtual_interface_query(context, session=None): return model_query(context, models.VirtualInterface, session=session, read_deleted="yes") @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context, session=session).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) @require_context def virtual_interface_get_all(context): """Get all vifs""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values = values.copy() values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) # and creat the info_cache table entry for instance instance_info_cache_create(context, {'instance_id': instance_ref['uuid']}) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) instance_id = instance_ref['id'] else: instance_ref = instance_get(context, instance_id, session=session) session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) instance_info_cache_delete(context, instance_ref['uuid'], session=session) return instance_ref @require_context def instance_get_by_uuid(context, uuid, session=None): result = _build_instance_get(context, session=session).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): result = _build_instance_get(context, session=session).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): return model_query(context, models.Instance, session=session, project_only=True).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all(context): return model_query(context, models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ all() @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False sort_fn = {'desc': desc, 'asc': asc} session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = utils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): deleted = or_(models.Instance.deleted == True, models.Instance.vm_state == vm_states.SOFT_DELETE) query_prefix = query_prefix.filter(deleted) else: query_prefix = query_prefix.\ filter_by(deleted=False).\ filter(models.Instance.vm_state != vm_states.SOFT_DELETE) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid'] # Filter the query query_prefix = exact_filter(query_prefix, models.Instance, filters, exact_match_filter_names) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) if not instances: break return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def _instance_get_all_query(context, project_only=False): return model_query(context, models.Instance, project_only=project_only).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all_by_host(context, host): return _instance_get_all_query(context).filter_by(host=host).all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _instance_get_all_query(context).\ filter_by(project_id=project_id).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): return _instance_get_all_query(context, project_only=True).\ filter_by(reservation_id=reservation_id).\ all() # NOTE(jkoelker) This is only being left here for compat with floating # ips. Currently the network_api doesn't return floaters # in network_info. Once it starts return the model. This # function and its call in compute/manager.py on 1829 can # go away @require_context def instance_get_floating_address(context, instance_id): fixed_ips = fixed_ip_get_by_instance(context, instance_id) if not fixed_ips: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips floating_ips = floating_ip_get_by_fixed_address(context, fixed_ips[0]['address']) if not floating_ips: return None # NOTE(vish): this just returns the first floating ip return floating_ips[0]['address'] @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window, session=None): reboot_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=reboot_window) if not session: session = get_session() results = session.query(models.Instance).\ filter(models.Instance.updated_at <= reboot_window).\ filter_by(task_state="rebooting").all() return results @require_context def instance_test_and_set(context, instance_id, attr, ok_states, new_state, session=None): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ if not session: session = get_session() with session.begin(): query = model_query(context, models.Instance, session=session, project_only=True) if utils.is_uuid_like(instance_id): query = query.filter_by(uuid=instance_id) else: query = query.filter_by(id=instance_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues instance = query.with_lockmode('update').first() state = instance[attr] if state not in ok_states: raise exception.InstanceInvalidState( attr=attr, instance_uuid=instance['uuid'], state=state, method='instance_test_and_set') instance[attr] = new_state instance.save(session=session) @require_context def instance_update(context, instance_id, values): session = get_session() if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_ref['id'], values.pop('metadata'), delete=True) with session.begin(): instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_ref['id']).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_uuid): """Return the actions associated to the given instance id""" session = get_session() return session.query(models.InstanceActions).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def instance_get_id_to_uuid_mapping(context, ids): session = get_session() instances = session.query(models.Instance).\ filter(models.Instance.id.in_(ids)).\ all() mapping = {} for instance in instances: mapping[instance['id']] = instance['uuid'] return mapping ################### @require_context def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ info_cache = models.InstanceInfoCache() info_cache.update(values) session = get_session() with session.begin(): info_cache.save(session=session) return info_cache @require_context def instance_info_cache_get(context, instance_uuid, session=None): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance :param session: = optional session object """ session = session or get_session() info_cache = session.query(models.InstanceInfoCache).\ filter_by(instance_id=instance_uuid).\ first() return info_cache @require_context def instance_info_cache_update(context, instance_uuid, values, session=None): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update :param session: = optional session object """ session = session or get_session() info_cache = instance_info_cache_get(context, instance_uuid, session=session) if info_cache: info_cache.update(values) info_cache.save(session=session) else: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry values['instance_id'] = instance_uuid info_cache = instance_info_cache_create(context, values) return info_cache @require_context def instance_info_cache_delete(context, instance_uuid, session=None): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ values = {'deleted': True, 'deleted_at': utils.utcnow()} instance_info_cache_update(context, instance_uuid, values, session) ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) result = model_query(context, models.KeyPair, session=session).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): return model_query(context, models.Network).count() @require_admin_context def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) @require_admin_context def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() @require_admin_context def network_create_safe(context, values): if values.get('vlan'): if model_query(context, models.Network, read_deleted="no")\ .filter_by(vlan=values['vlan'])\ .first(): raise exception.DuplicateVlan(vlan=values['vlan']) network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): result = session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(deleted=False).\ filter_by(allocated=True).\ all() if result: raise exception.NetworkInUse(network_id=network_id) network_ref = network_get(context, network_id=network_id, session=session) session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(deleted=False).\ update({'deleted': True, 'updated_at': literal_column('updated_at'), 'deleted_at': utils.utcnow()}) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_context def network_get(context, network_id, session=None): result = model_query(context, models.Network, session=session, project_only=True).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): result = model_query(context, models.Network, read_deleted="no").all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = model_query(context, models.Network, read_deleted="no").\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == False) inst_and = and_(models.Instance.id == models.FixedIp.instance_id, models.Instance.deleted == False) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_id, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None) if host: query = query.filter(models.Instance.host == host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_id'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] data.append(cleaned) return data @require_admin_context def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") @require_admin_context def network_get_by_bridge(context, bridge): result = _network_get_query(context).filter_by(bridge=bridge).first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ first() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_instance(context, instance_id): result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_host(context, host): session = get_session() fixed_ip_query = model_query(context, models.FixedIp.network_id, session=session).\ filter(models.FixedIp.host == host) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = _network_get_query(context, session=session).\ filter_by(id=network_id).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): result = model_query(context, models.AuthToken, session=session).\ filter_by(token_hash=token_hash).\ first() if not result: raise exception.AuthTokenNotFound(token=token_hash) return result @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_context def quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save() return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit quota_class_ref.save(session=session) @require_admin_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = quota_class_get(context, class_name, resource, session=session) quota_class_ref.delete(session=session) @require_admin_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ filter_by(volume=None).\ filter_by(host=host).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')) @require_context def volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): return _volume_get_query(context).all() @require_admin_context def volume_get_all_by_host(context, host): return _volume_get_query(context).filter_by(host=host).all() @require_admin_context def volume_get_all_by_instance(context, instance_id): result = model_query(context, models.Volume, read_deleted="no").\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _volume_get_query(context).filter_by(project_id=project_id).all() @require_admin_context def volume_get_instance(context, volume_id): result = _volume_get_query(context).filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### def _volume_metadata_get_query(context, volume_id, session=None): return model_query(context, models.VolumeMetadata, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) @require_context @require_volume_exists def volume_metadata_get(context, volume_id): rows = _volume_metadata_get_query(context, volume_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): _volume_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): result = _volume_metadata_get_query(context, volume_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): return model_query(context, models.Snapshot).all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ filter_by(project_id=project_id).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### def _block_device_mapping_get_query(context, session=None): return model_query(context, models.BlockDeviceMapping, session=session, read_deleted="no") @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(id=bdm_id).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): return _block_device_mapping_get_query(context).\ filter_by(instance_id=instance_id).\ all() @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_get_query(context, session=None, read_deleted=None, project_only=False): return model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only).\ options(joinedload_all('rules')) @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, session=None): result = _security_group_get_query(context, session=session, project_only=True).\ filter_by(id=security_group_id).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): result = _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_id): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=group_id).\ filter_by(deleted=False).\ all() for ia in inst_assoc: num_instances = session.query(models.Instance).\ filter_by(deleted=False).\ filter_by(id=ia.instance_id).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def security_group_count_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.SecurityGroup, read_deleted="no").\ filter_by(project_id=project_id).\ count() ################### def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id, session=None): result = _security_group_rule_get_query(context, session=session).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(group_id=security_group_id).\ all() @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) @require_context def security_group_rule_count_by_group(context, security_group_id): return model_query(context, models.SecurityGroupIngressRule, read_deleted="no").\ filter_by(parent_group_id=security_group_id).\ count() # ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): result = model_query(context, models.User, session=session).\ filter_by(id=id).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): result = model_query(context, models.User, session=session).\ filter_by(access_key=access_key).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): return model_query(context, models.User).all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) ################### def project_create(context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): result = model_query(context, models.Project, session=session, read_deleted="no").\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): return model_query(context, models.Project).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): user = model_query(context, models.User).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @require_admin_context def migration_get_all_unconfirmed(context, confirm_window, session=None): confirm_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=confirm_window) return model_query(context, models.Migration, session=session, read_deleted="yes").\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): return model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_id=instance_id).\ all() def console_get(context, console_id, instance_id=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_id is not None: query = query.filter_by(instance_id=instance_id) result = query.first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: instance_type_get_by_name(context, values['name'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.InstanceTypeNotFoundByName: pass try: instance_type_get_by_flavor_id(context, values['flavorid'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.FlavorNotFound: pass try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save(session=session) except Exception, e: raise exception.DBError(e) return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance, volume, or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _instance_type_get_query(context, session=None, read_deleted=None): return model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) @require_context def instance_type_get_all(context, inactive=False, filters=None): """ Returns all instance types. """ filters = filters or {} read_deleted = "yes" if inactive else "no" query = _instance_type_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) inst_types = query.order_by("name").all() return [_dict_with_extra_specs(i) for i in inst_types] @require_context def instance_type_get(context, id, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(id=id).\ first() if not result: raise exception.InstanceTypeNotFound(instance_type_id=id) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_name(context, name, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(name=name).\ first() if not result: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_flavor_id(context, flavor_id, session=None): """Returns a dict describing specific flavor_id""" result = _instance_type_get_query(context, session=session).\ filter_by(flavorid=flavor_id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @require_admin_context def instance_type_destroy(context, name): """Marks specific instance_type as deleted""" session = get_session() with session.begin(): instance_type_ref = instance_type_get_by_name(context, name, session=session) instance_type_id = instance_type_ref['id'] session.query(models.InstanceTypes).\ filter_by(id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### @require_admin_context def cell_create(context, values): cell = models.Cell() cell.update(values) cell.save() return cell def _cell_get_by_id_query(context, cell_id, session=None): return model_query(context, models.Cell, session=session).\ filter_by(id=cell_id) @require_admin_context def cell_update(context, cell_id, values): cell = cell_get(context, cell_id) cell.update(values) cell.save() return cell @require_admin_context def cell_delete(context, cell_id): session = get_session() with session.begin(): _cell_get_by_id_query(context, cell_id, session=session).\ delete() @require_admin_context def cell_get(context, cell_id): result = _cell_get_by_id_query(context, cell_id).first() if not result: raise exception.CellNotFound(cell_id=cell_id) return result @require_admin_context def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() #################### def _instance_metadata_get_query(context, instance_id, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_id=instance_id) @require_context @require_instance_exists def instance_metadata_get(context, instance_id): rows = _instance_metadata_get_query(context, instance_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): _instance_metadata_get_query(context, instance_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): result = _instance_metadata_get_query( context, instance_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): return model_query(context, models.AgentBuild, session=session, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() @require_admin_context def agent_build_get_all(context): return model_query(context, models.AgentBuild, read_deleted="no").\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def bw_usage_get_by_macs(context, macs, start_period): return model_query(context, models.BandwidthUsage, read_deleted="yes").\ filter(models.BandwidthUsage.mac.in_(macs)).\ filter_by(start_period=start_period).\ all() @require_context def bw_usage_update(context, mac, start_period, bw_in, bw_out, session=None): if not session: session = get_session() with session.begin(): bwusage = model_query(context, models.BandwidthUsage, session=session, read_deleted="yes").\ filter_by(start_period=start_period).\ filter_by(mac=mac).\ first() if not bwusage: bwusage = models.BandwidthUsage() bwusage.start_period = start_period bwusage.mac = mac bwusage.last_refreshed = utils.utcnow() bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.save(session=session) #################### def _instance_type_extra_specs_get_query(context, instance_type_id, session=None): return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id) @require_context def instance_type_extra_specs_get(context, instance_type_id): rows = _instance_type_extra_specs_get_query( context, instance_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): _instance_type_extra_specs_get_query( context, instance_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): result = _instance_type_extra_specs_get_query( context, instance_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceTypeExtraSpecsNotFound( extra_specs_key=key, instance_type_id=instance_type_id) return result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(name=values['name']) except exception.VolumeTypeNotFoundByName: pass try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): """ Returns a dict describing all volume_types with name as key. """ filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ order_by("name").\ all() # TODO(sirp): this patern of converting rows to a result with extra_specs # is repeated quite a bit, might be worth creating a method for it result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result @require_context def volume_type_get(context, id, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type=id) return _dict_with_extra_specs(result) @require_context def volume_type_get_by_name(context, name, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(result) @require_admin_context def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_query(context, volume_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception, e: raise exception.DBError(e) return s3_image_ref #################### @require_admin_context def sm_backend_conf_create(context, values): backend_conf = models.SMBackendConf() backend_conf.update(values) backend_conf.save() return backend_conf @require_admin_context def sm_backend_conf_update(context, sm_backend_id, values): session = get_session() with session.begin(): backend_conf = model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not backend_conf: raise exception.NotFound( _("No backend config with id %(sm_backend_id)s") % locals()) backend_conf.update(values) backend_conf.save(session=session) return backend_conf @require_admin_context def sm_backend_conf_delete(context, sm_backend_id): # FIXME(sirp): for consistency, shouldn't this just mark as deleted with # `purge` actually deleting the record? session = get_session() with session.begin(): model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ delete() @require_admin_context def sm_backend_conf_get(context, sm_backend_id): result = model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not result: raise exception.NotFound(_("No backend config with id " "%(sm_backend_id)s") % locals()) return result @require_admin_context def sm_backend_conf_get_by_sr(context, sr_uuid): session = get_session() return model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(sr_uuid=sr_uuid).\ first() @require_admin_context def sm_backend_conf_get_all(context): return model_query(context, models.SMBackendConf, read_deleted="yes").\ all() #################### def _sm_flavor_get_query(context, sm_flavor_label, session=None): return model_query(context, models.SMFlavors, session=session, read_deleted="yes").\ filter_by(label=sm_flavor_label) @require_admin_context def sm_flavor_create(context, values): sm_flavor = models.SMFlavors() sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_update(context, sm_flavor_label, values): sm_flavor = sm_flavor_get(context, sm_flavor_label) sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_delete(context, sm_flavor_label): session = get_session() with session.begin(): _sm_flavor_get_query(context, sm_flavor_label).delete() @require_admin_context def sm_flavor_get(context, sm_flavor_label): result = _sm_flavor_get_query(context, sm_flavor_label).first() if not result: raise exception.NotFound( _("No sm_flavor called %(sm_flavor)s") % locals()) return result @require_admin_context def sm_flavor_get_all(context): return model_query(context, models.SMFlavors, read_deleted="yes").all() ############################### def _sm_volume_get_query(context, volume_id, session=None): return model_query(context, models.SMVolume, session=session, read_deleted="yes").\ filter_by(id=volume_id) def sm_volume_create(context, values): sm_volume = models.SMVolume() sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_update(context, volume_id, values): sm_volume = sm_volume_get(context, volume_id) sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_delete(context, volume_id): session = get_session() with session.begin(): _sm_volume_get_query(context, volume_id, session=session).delete() def sm_volume_get(context, volume_id): result = _sm_volume_get_query(context, volume_id).first() if not result: raise exception.NotFound( _("No sm_volume with id %(volume_id)s") % locals()) return result def sm_volume_get_all(context): return model_query(context, models.SMVolume, read_deleted="yes").all() ################ def _aggregate_get_query(context, model_class, id_field, id, session=None, read_deleted=None): return model_query(context, model_class, session=session, read_deleted=read_deleted).filter(id_field == id) @require_admin_context def aggregate_create(context, values, metadata=None): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='yes').first() values.setdefault('operational_state', aggregate_states.CREATED) if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) elif aggregate.deleted: values['deleted'] = False values['deleted_at'] = None aggregate.update(values) aggregate.save(session=session) else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate @require_admin_context def aggregate_get(context, aggregate_id): aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id).first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @require_admin_context def aggregate_get_by_host(context, host): aggregate_host = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.host, host).first() if not aggregate_host: raise exception.AggregateHostNotFound(host=host) return aggregate_get(context, aggregate_host.aggregate_id) @require_admin_context def aggregate_update(context, aggregate_id, values): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).first() if aggregate: metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=True) with session.begin(): aggregate.update(values) aggregate.save(session=session) values['metadata'] = metadata return aggregate else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_delete(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'operational_state': aggregate_states.DISMISSED, 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_get_all(context): return model_query(context, models.Aggregate).all() @require_admin_context @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id): rows = model_query(context, models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).all() return dict([(r['key'], r['value']) for r in rows]) @require_admin_context @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): query = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id).\ filter_by(key=key) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_admin_context @require_aggregate_exists def aggregate_metadata_get_item(context, aggregate_id, key, session=None): result = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(key=key).first() if not result: raise exception.AggregateMetadataNotFound(metadata_key=key, aggregate_id=aggregate_id) return result @require_admin_context @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): session = get_session() if set_delete: original_metadata = aggregate_metadata_get(context, aggregate_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None for meta_key, meta_value in metadata.iteritems(): item = {"value": meta_value} try: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) if meta_ref.deleted: item.update({'deleted': False, 'deleted_at': None}) except exception.AggregateMetadataNotFound: meta_ref = models.AggregateMetadata() item.update({"key": meta_key, "aggregate_id": aggregate_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata @require_admin_context @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id): rows = model_query(context, models.AggregateHost).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_admin_context @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): query = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id).filter_by(host=host) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_admin_context @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): session = get_session() host_ref = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(host=host).first() if not host_ref: try: host_ref = models.AggregateHost() values = {"host": host, "aggregate_id": aggregate_id, } host_ref.update(values) host_ref.save(session=session) except exception.DBError: raise exception.AggregateHostConflict(host=host) elif host_ref.deleted: host_ref.update({'deleted': False, 'deleted_at': None}) host_ref.save(session=session) else: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref.iteritems()) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row.iteritems()) output[row['instance_uuid']].append(data) return output
./CrossVul/dataset_final_sorted/CWE-264/py/good_3632_3
crossvul-python_data_bad_3691_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3691_0
crossvul-python_data_good_3633_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova import db from nova import exception from nova import quota from nova.api.openstack import extensions class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" return {'quota_set': { 'id': str(project_id), 'metadata_items': quota_set['metadata_items'], 'injected_file_content_bytes': quota_set['injected_file_content_bytes'], 'volumes': quota_set['volumes'], 'gigabytes': quota_set['gigabytes'], 'ram': quota_set['ram'], 'floating_ips': quota_set['floating_ips'], 'instances': quota_set['instances'], 'injected_files': quota_set['injected_files'], 'cores': quota_set['cores'], 'security_groups': quota_set['security_groups'], 'security_group_rules': quota_set['security_group_rules'], }} def show(self, req, id): context = req.environ['nova.context'] try: db.sqlalchemy.api.authorize_project_context(context, id) return self._format_quota_set(id, quota.get_project_quotas(context, id)) except exception.NotAuthorized: return webob.Response(status_int=403) def update(self, req, id, body): context = req.environ['nova.context'] project_id = id resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores', 'security_groups', 'security_group_rules'] for key in body['quota_set'].keys(): if key in resources: value = int(body['quota_set'][key]) try: db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) except exception.AdminRequired: return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} def defaults(self, req, id): return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): def get_name(self): return "Quotas" def get_alias(self): return "os-quota-sets" def get_description(self): return "Quotas management support" def get_namespace(self): return "http://docs.openstack.org/ext/quotas-sets/api/v1.1" def get_updated(self): return "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/good_3633_1
crossvul-python_data_bad_3633_5
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" from nova import db from nova import exception from nova import flags FLAGS = flags.FLAGS flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') flags.DEFINE_integer('quota_ram', 50 * 1024, 'megabytes of instance ram allowed per project') flags.DEFINE_integer('quota_volumes', 10, 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') flags.DEFINE_integer('quota_floating_ips', 10, 'number of floating ips allowed per project') flags.DEFINE_integer('quota_metadata_items', 128, 'number of metadata items allowed per instance') flags.DEFINE_integer('quota_max_injected_files', 5, 'number of injected files allowed') flags.DEFINE_integer('quota_max_injected_file_content_bytes', 10 * 1024, 'number of bytes allowed per injected file') flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, 'injected_files': FLAGS.quota_max_injected_files, 'injected_file_content_bytes': FLAGS.quota_max_injected_file_content_bytes, } # -1 in the quota flags means unlimited for key in defaults.keys(): if defaults[key] == -1: defaults[key] = None return defaults def get_project_quotas(context, project_id): rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: rval[key] = quota[key] return rval def _get_request_allotment(requested, used, quota): if quota is None: return requested return quota - used def allowed_instances(context, requested_instances, instance_type): """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() requested_cores = requested_instances * instance_type['vcpus'] requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage quota = get_project_quotas(context, project_id) allowed_instances = _get_request_allotment(requested_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus'], allowed_ram // instance_type['memory_mb']) return min(requested_instances, allowed_instances) def allowed_volumes(context, requested_volumes, size): """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) if size != 0: allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) def allowed_floating_ips(context, requested_floating_ips): """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_project_quotas(context, project_id) allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) return min(requested_floating_ips, allowed_floating_ips) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', requested_metadata_items) def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', requested_injected_files) def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes class QuotaError(exception.ApiError): """Quota Exceeded.""" pass
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3633_5
crossvul-python_data_bad_3697_2
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # # Copyright 2011, Piston Cloud Computing, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to resize, repartition, and modify disk images. Includes injection of SSH PGP keys into authorized_keys file. """ import crypt import os import random import tempfile from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt.disk import guestfs from nova.virt.disk import loop from nova.virt.disk import nbd from nova.virt import images LOG = logging.getLogger(__name__) disk_opts = [ cfg.StrOpt('injected_network_template', default='$pybasedir/nova/virt/interfaces.template', help='Template file for injected network'), cfg.ListOpt('img_handlers', default=['loop', 'nbd', 'guestfs'], help='Order of methods used to mount disk images'), # NOTE(yamahata): ListOpt won't work because the command may include a # comma. For example: # # mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to # escape such commas. # cfg.MultiStrOpt('virt_mkfs', default=[ 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs' ' --force --fast --label %(fs_label)s %(target)s', # NOTE(yamahata): vfat case #'windows=mkfs.vfat -n %(fs_label)s %(target)s', ], help='mkfs commands for ephemeral device. ' 'The format is <os_type>=<mkfs command>'), ] FLAGS = flags.FLAGS FLAGS.register_opts(disk_opts) _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None for s in FLAGS.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. # So item.partition('=') doesn't work here os_type, mkfs_command = s.split('=', 1) if os_type: _MKFS_COMMAND[os_type] = mkfs_command if os_type == 'default': _DEFAULT_MKFS_COMMAND = mkfs_command def mkfs(os_type, fs_label, target): mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % locals() if mkfs_command: utils.execute(*mkfs_command.split()) def resize2fs(image, check_exit_code=False): utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code) utils.execute('resize2fs', image, check_exit_code=check_exit_code) def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path)['virtual size'] size = size.split('(')[1].split()[0] return int(size) def extend(image, size): """Increase image to size""" virt_size = get_disk_size(image) if virt_size >= size: return utils.execute('qemu-img', 'resize', image, size) # NOTE(vish): attempts to resize filesystem resize2fs(image) def can_resize_fs(image, size, use_cow=False): """Check whether we can resize contained file system.""" # Check that we're increasing the size virt_size = get_disk_size(image) if virt_size >= size: return False # Check the image is unpartitioned if use_cow: # Try to mount an unpartitioned qcow2 image try: inject_data(image, use_cow=True) except exception.NovaException: return False else: # For raw, we can directly inspect the file system try: utils.execute('e2label', image) except exception.ProcessExecutionError: return False return True def bind(src, target, instance_name): """Bind device to a filesytem""" if src: utils.execute('touch', target, run_as_root=True) utils.execute('mount', '-o', 'bind', src, target, run_as_root=True) s = os.stat(src) cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev), os.minor(s.st_rdev)) cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/" "%s/devices.allow" % instance_name) utils.execute('tee', cgroups_path, process_input=cgroup_info, run_as_root=True) def unbind(target): if target: utils.execute('umount', target, run_as_root=True) class _DiskImage(object): """Provide operations on a disk image file.""" tmp_prefix = 'openstack-disk-mount-tmp' def __init__(self, image, partition=None, use_cow=False, mount_dir=None): # These passed to each mounter self.image = image self.partition = partition self.mount_dir = mount_dir # Internal self._mkdir = False self._mounter = None self._errors = [] # As a performance tweak, don't bother trying to # directly loopback mount a cow image. self.handlers = FLAGS.img_handlers[:] if use_cow and 'loop' in self.handlers: self.handlers.remove('loop') if not self.handlers: msg = _('no capable image handler configured') raise exception.NovaException(msg) if mount_dir: # Note the os.path.ismount() shortcut doesn't # work with libguestfs due to permissions issues. device = self._device_for_path(mount_dir) if device: self._reset(device) @staticmethod def _device_for_path(path): device = None with open("/proc/mounts", 'r') as ifp: for line in ifp: fields = line.split() if fields[1] == path: device = fields[0] break return device def _reset(self, device): """Reset internal state for a previously mounted directory.""" mounter_cls = self._handler_class(device=device) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir, device=device) self._mounter = mounter mount_name = os.path.basename(self.mount_dir or '') self._mkdir = mount_name.startswith(self.tmp_prefix) @property def errors(self): """Return the collated errors from all operations.""" return '\n--\n'.join([''] + self._errors) @staticmethod def _handler_class(mode=None, device=None): """Look up the appropriate class to use based on MODE or DEVICE.""" for cls in (loop.Mount, nbd.Mount, guestfs.Mount): if mode and cls.mode == mode: return cls elif device and cls.device_id_string in device: return cls msg = _("no disk image handler for: %s") % mode or device raise exception.NovaException(msg) def mount(self): """Mount a disk image, using the object attributes. The first supported means provided by the mount classes is used. True, or False is returned and the 'errors' attribute contains any diagnostics. """ if self._mounter: raise exception.NovaException(_('image already mounted')) if not self.mount_dir: self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix) self._mkdir = True try: for h in self.handlers: mounter_cls = self._handler_class(h) mounter = mounter_cls(image=self.image, partition=self.partition, mount_dir=self.mount_dir) if mounter.do_mount(): self._mounter = mounter break else: LOG.debug(mounter.error) self._errors.append(mounter.error) finally: if not self._mounter: self.umount() # rmdir return bool(self._mounter) def umount(self): """Unmount a disk image from the file system.""" try: if self._mounter: self._mounter.do_umount() finally: if self._mkdir: os.rmdir(self.mount_dir) # Public module functions def inject_data(image, key=None, net=None, metadata=None, admin_password=None, files=None, partition=None, use_cow=False): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If partition is not specified it mounts the image as a single partition. """ img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: inject_data_into_fs(img.mount_dir, key, net, metadata, admin_password, files) finally: img.umount() else: raise exception.NovaException(img.errors) def setup_container(image, container_dir, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. """ img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) if not img.mount(): LOG.error(_("Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s") % {"image": img, "target": container_dir, "errors": img.errors}) raise exception.NovaException(img.errors) def destroy_container(container_dir): """Destroy the container once it terminates. It will umount the container that is mounted, and delete any linked devices. """ try: img = _DiskImage(image=None, mount_dir=container_dir) img.umount() except Exception, exn: LOG.exception(_('Failed to unmount container filesystem: %s'), exn) def inject_data_into_fs(fs, key, net, metadata, admin_password, files): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data """ if key: _inject_key_into_fs(key, fs) if net: _inject_net_into_fs(net, fs) if metadata: _inject_metadata_into_fs(metadata, fs) if admin_password: _inject_admin_password_into_fs(admin_password, fs) if files: for (path, contents) in files: _inject_file_into_fs(fs, path, contents) def _join_and_check_path_within_fs(fs, *args): '''os.path.join() with safety check for injected file paths. Join the supplied path components and make sure that the resulting path we are injecting into is within the mounted guest fs. Trying to be clever and specifying a path with '..' in it will hit this safeguard. ''' absolute_path = os.path.realpath(os.path.join(fs, *args)) if not absolute_path.startswith(os.path.realpath(fs) + '/'): raise exception.Invalid(_('injected file path not valid')) return absolute_path def _inject_file_into_fs(fs, path, contents, append=False): absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) args = [] if append: args.append('-a') args.append(absolute_path) kwargs = dict(process_input=contents, run_as_root=True) utils.execute('tee', *args, **kwargs) def _inject_metadata_into_fs(metadata, fs): metadata = dict([(m.key, m.value) for m in metadata]) _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata)) def _setup_selinux_for_keys(fs): """Get selinux guests to ensure correct context on injected keys.""" se_cfg = _join_and_check_path_within_fs(fs, 'etc', 'selinux') se_cfg, _err = utils.trycmd('readlink', '-e', se_cfg, run_as_root=True) if not se_cfg: return rclocal = _join_and_check_path_within_fs(fs, 'etc', 'rc.local') # Support systemd based systems rc_d = _join_and_check_path_within_fs(fs, 'etc', 'rc.d') rclocal_e, _err = utils.trycmd('readlink', '-e', rclocal, run_as_root=True) rc_d_e, _err = utils.trycmd('readlink', '-e', rc_d, run_as_root=True) if not rclocal_e and rc_d_e: rclocal = os.path.join(rc_d, 'rc.local') # Note some systems end rc.local with "exit 0" # and so to append there you'd need something like: # utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True) restorecon = [ '#!/bin/sh\n', '# Added by Nova to ensure injected ssh keys have the right context\n', 'restorecon -RF /root/.ssh/ 2>/dev/null || :\n', ] rclocal_rel = os.path.relpath(rclocal, fs) _inject_file_into_fs(fs, rclocal_rel, ''.join(restorecon), append=True) utils.execute('chmod', 'a+x', rclocal, run_as_root=True) def _inject_key_into_fs(key, fs): """Add the given public ssh key to root's authorized_keys. key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh') utils.execute('mkdir', '-p', sshdir, run_as_root=True) utils.execute('chown', 'root', sshdir, run_as_root=True) utils.execute('chmod', '700', sshdir, run_as_root=True) keyfile = os.path.join('root', '.ssh', 'authorized_keys') key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n', ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) _setup_selinux_for_keys(fs) def _inject_net_into_fs(net, fs): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = _join_and_check_path_within_fs(fs, 'etc', 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net) def _inject_admin_password_into_fs(admin_passwd, fs): """Set the root password to admin_passwd admin_password is a root password fs is the path to the base of the filesystem into which to inject the key. This method modifies the instance filesystem directly, and does not require a guest agent running in the instance. """ # The approach used here is to copy the password and shadow # files from the instance filesystem to local files, make any # necessary changes, and then copy them back. admin_user = 'root' fd, tmp_passwd = tempfile.mkstemp() os.close(fd) fd, tmp_shadow = tempfile.mkstemp() os.close(fd) passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd') shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow') utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True) utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True) _set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow) utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True) os.unlink(tmp_passwd) utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True) os.unlink(tmp_shadow) def _set_passwd(username, admin_passwd, passwd_file, shadow_file): """set the password for username to admin_passwd The passwd_file is not modified. The shadow_file is updated. if the username is not found in both files, an exception is raised. :param username: the username :param encrypted_passwd: the encrypted password :param passwd_file: path to the passwd file :param shadow_file: path to the shadow password file :returns: nothing :raises: exception.NovaException(), IOError() """ salt_set = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789./') # encryption algo - id pairs for crypt() algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''} salt = 16 * ' ' salt = ''.join([random.choice(salt_set) for c in salt]) # crypt() depends on the underlying libc, and may not support all # forms of hash. We try md5 first. If we get only 13 characters back, # then the underlying crypt() didn't understand the '$n$salt' magic, # so we fall back to DES. # md5 is the default because it's widely supported. Although the # local crypt() might support stronger SHA, the target instance # might not. encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt) if len(encrypted_passwd) == 13: encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt) try: p_file = open(passwd_file, 'rb') s_file = open(shadow_file, 'rb') # username MUST exist in passwd file or it's an error found = False for entry in p_file: split_entry = entry.split(':') if split_entry[0] == username: found = True break if not found: msg = _('User %(username)s not found in password file.') raise exception.NovaException(msg % username) # update password in the shadow file.It's an error if the # the user doesn't exist. new_shadow = list() found = False for entry in s_file: split_entry = entry.split(':') if split_entry[0] == username: split_entry[1] = encrypted_passwd found = True new_entry = ':'.join(split_entry) new_shadow.append(new_entry) s_file.close() if not found: msg = _('User %(username)s not found in shadow file.') raise exception.NovaException(msg % username) s_file = open(shadow_file, 'wb') for entry in new_shadow: s_file.write(entry) finally: p_file.close() s_file.close()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3697_2
crossvul-python_data_good_5538_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import sys import novaclient.exceptions import webob.exc from nova import log as logging LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) class Error(Exception): pass class EC2APIError(Error): def __init__(self, message='Unknown', code=None): self.msg = message self.code = code if code: outstr = '%s: %s' % (code, message) else: outstr = '%s' % message super(EC2APIError, self).__init__(outstr) class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ # TODO(sandy): Find a way to import nova.notifier.api so we don't have # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( # TODO(johannes): Also, it would be nice to use # utils.save_and_reraise_exception() without an import loop def inner(f): def wrapped(*args, **kw): try: return f(*args, **kw) except Exception, e: # Save exception since it can be clobbered during processing # below before we can re-raise exc_info = sys.exc_info() if notifier: payload = dict(args=args, exception=e) payload.update(kw) # Use a temp vars so we don't shadow # our outer definitions. temp_level = level if not temp_level: temp_level = notifier.ERROR temp_type = event_type if not temp_type: # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. temp_type = f.__name__ notifier.notify(publisher_id, temp_type, temp_level, payload) # re-raise original exception since it may have been clobbered raise exc_info[0], exc_info[1], exc_info[2] return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # at least get the core message out if something happened message = self.message super(NovaException, self).__init__(message) class DecryptionFailure(NovaException): message = _("Failed to decrypt text") class ImagePaginationFailed(NovaException): message = _("Failed to paginate through images from image service") class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): message = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): message = _("Connection to glance failed") + ": %(reason)s" class MelangeConnectionFailed(NovaException): message = _("Connection to melange failed") + ": %(reason)s" class NotAuthorized(NovaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(NovaException): message = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot") + ": %(reason)s" class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") class InvalidKeypair(Invalid): message = _("Keypair data is invalid") class SfJsonEncodeFailure(NovaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidSignature(Invalid): message = _("Invalid signature %(signature)s for user %(user)s.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidInstanceType(Invalid): message = _("Invalid instance type %(instance_type)s.") class InvalidVolumeType(Invalid): message = _("Invalid volume type") + ": %(reason)s" class InvalidVolume(Invalid): message = _("Invalid volume") + ": %(reason)s" class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): message = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): message = _("Invalid cidr %(cidr)s.") class InvalidRPCConnectionReuse(Invalid): message = _("Invalid reuse of an RPC connection.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAggregateAction(Invalid): message = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): message = _("Group not valid. Reason: %(reason)s") class InstanceInvalidState(Invalid): message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InvalidBDM(Invalid): message = _("Block Device Mapping is Invalid.") class InvalidBDMSnapshot(InvalidBDM): message = _("Block Device Mapping is Invalid: " "failed to get snapshot %(id)s.") class InvalidBDMVolume(InvalidBDM): message = _("Block Device Mapping is Invalid: " "failed to get volume %(id)s.") class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") class InstanceNotSuspended(Invalid): message = _("Instance %(instance_id)s is not suspended.") class InstanceNotInRescueMode(Invalid): message = _("Instance %(instance_id)s is not in rescue mode") class InstanceSuspendFailure(Invalid): message = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): message = _("Failed to resume server") + ": %(reason)s." class InstanceRebootFailure(Invalid): message = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): message = _("Failed to terminate instance") + ": %(reason)s" class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class VolumeServiceUnavailable(ServiceUnavailable): message = _("Volume service is unavailable at this time.") class ComputeServiceUnavailable(ServiceUnavailable): message = _("Compute service is unavailable at this time.") class UnableToMigrateToSelf(Invalid): message = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class DestinationHostUnavailable(Invalid): message = _("Destination compute host is unavailable at this time.") class SourceHostUnavailable(Invalid): message = _("Original compute host is unavailable at this time.") class InvalidHypervisorType(Invalid): message = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): message = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): message = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") class DeviceIsBusy(Invalid): message = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): message = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): message = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): message = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): message = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): message = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): message = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): message = _("Ec2 id %(ec2_id)s is unacceptable.") class NotFound(NovaException): message = _("Resource could not be found.") code = 404 class FlagNotSet(NotFound): message = _("Required flag %(flag)s not set.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class SfAccountNotFound(NotFound): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class NoVolumeTypesFound(NotFound): message = _("Zero volume types found.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class VolumeIsBusy(NovaException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(NovaException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class DiskNotFound(NotFound): message = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): message = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ListingImageRefsNotSupported(Invalid): message = _("Some images have been stored via hrefs." + " This version of the api does not support displaying image hrefs.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class KernelNotFoundForImage(ImageNotFound): message = _("Kernel not found for image %(image_id)s.") class UserNotFound(NotFound): message = _("User %(user_id)s could not be found.") class ProjectNotFound(NotFound): message = _("Project %(project_id)s could not be found.") class ProjectMembershipNotFound(NotFound): message = _("User %(user_id)s is not a member of project %(project_id)s.") class UserRoleNotFound(NotFound): message = _("Role %(role_id)s could not be found.") class StorageRepositoryNotFound(NotFound): message = _("Cannot find SR to read/write VDI.") class NetworkInUse(NovaException): message = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): message = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): message = _("Network %(network_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): message = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): message = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): message = _("No networks defined.") class NetworkNotFoundForProject(NotFound): message = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkHostNotSet(NovaException): message = _("Host is not set to the network (%(network_id)s).") class NetworkBusy(NovaException): message = _("Network %(network)s has active ports, cannot delete.") class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): message = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_id)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): message = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForHost(FixedIpNotFound): message = _("Host %(host)s has zero fixed ips.") class FixedIpNotFoundForNetwork(FixedIpNotFound): message = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): message = _("Fixed IP address %(address)s is already in use.") class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): message = _("Zero fixed ips could be found.") class FloatingIpNotFound(NotFound): message = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): message = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): message = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): message = _("Floating ip not found for host %(host)s.") class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") class FloatingIpAssociated(NovaException): message = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): message = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): message = _("Interface %(interface)s not found.") class KeypairNotFound(NotFound): message = _("Keypair %(name)s not found for user %(user_id)s") class CertificateNotFound(NotFound): message = _("Certificate %(certificate_id)s not found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): message = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class AuthTokenNotFound(NotFound): message = _("Auth token %(token)s could not be found.") class AccessKeyNotFound(NotFound): message = _("Access Key %(access_key)s could not be found.") class QuotaNotFound(NotFound): message = _("Quota could not be found") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class SecurityGroupNotFound(NotFound): message = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): message = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): message = _("Console pool %(pool_id)s could not be found.") class ConsolePoolNotFoundForHostType(NotFound): message = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): message = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): message = _("Console for instance %(instance_id)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): message = _("Console for instance %(instance_id)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): message = _("Invalid console type %(console_type)s ") class NoInstanceTypesFound(NotFound): message = _("Zero instance types found.") class InstanceTypeNotFound(NotFound): message = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): message = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): message = _("Flavor %(flavor_id)s could not be found.") class CellNotFound(NotFound): message = _("Cell %(cell_id)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_id)s has no metadata with " "key %(metadata_key)s.") class InstanceTypeExtraSpecsNotFound(NotFound): message = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class LDAPObjectNotFound(NotFound): message = _("LDAP object could not be found") class LDAPUserNotFound(LDAPObjectNotFound): message = _("LDAP user %(user_id)s could not be found.") class LDAPGroupNotFound(LDAPObjectNotFound): message = _("LDAP group %(group_id)s could not be found.") class LDAPGroupMembershipNotFound(NotFound): message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): message = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): message = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): message = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): message = _("Action not allowed.") class GlobalRoleNotAllowed(NotAllowed): message = _("Unable to use global role %(role_id)s") class ImageRotationNotAllowed(NovaException): message = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): message = _("Rotation param is required for backup image_type") #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class UserExists(Duplicate): message = _("User %(user)s already exists.") class LDAPUserExists(UserExists): message = _("LDAP user %(user)s already exists.") class LDAPGroupExists(Duplicate): message = _("LDAP group %(group)s already exists.") class LDAPMembershipExists(Duplicate): message = _("User %(uid)s is already a member of " "the group %(group_dn)s") class ProjectExists(Duplicate): message = _("Project %(project)s already exists.") class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") class InstanceTypeExists(Duplicate): message = _("Instance Type %(name)s already exists.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(name)s already exists.") class InvalidSharedStorage(NovaException): message = _("%(path)s is on shared storage: %(reason)s") class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(NovaException): message = _("Malformed message body: %(reason)s") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameSize(NovaException): message = _("When resizing, instances must change size!") class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") class ZoneRequestError(NovaException): message = _("1 or more Zones could not complete the request") class InstanceTypeMemoryTooSmall(NovaException): message = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): message = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): message = _("Insufficient free memory on compute node to start %(uuid)s.") class CouldNotFetchMetrics(NovaException): message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") class NoValidHost(NovaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(NovaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(NovaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class AggregateError(NovaException): message = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): message = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(Duplicate): message = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostConflict(Duplicate): message = _("Host %(host)s already member of another aggregate.") class AggregateHostExists(Duplicate): message = _("Aggregate %(aggregate_id)s already has host %(host)s.") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(NovaException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class InstanceTypeCreateFailed(NovaException): message = _("Unable to create instance type") class SolidFireAPIException(NovaException): message = _("Bad response from SolidFire API") class SolidFireAPIStatusException(SolidFireAPIException): message = _("Error in SolidFire API response: status=%(status)s") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class DuplicateVlan(Duplicate): message = _("Detected existing vlan with id %(vlan)d") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class InvalidInstanceIDMalformed(Invalid): message = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): message = _("Could not fetch image %(image)s")
./CrossVul/dataset_final_sorted/CWE-264/py/good_5538_1
crossvul-python_data_good_3693_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Token service.""" import datetime from keystone import config from keystone import exception from keystone.common import manager CONF = config.CONF config.register_int('expiration', group='token', default=86400) class Manager(manager.Manager): """Default pivot point for the Token backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.token.driver) class Driver(object): """Interface description for a Token driver.""" def get_token(self, token_id): """Get a token by id. :param token_id: identity of the token :type token_id: string :returns: token_ref :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def create_token(self, token_id, data): """Create a token by id and data. :param token_id: identity of the token :type token_id: string :param data: dictionary with additional reference information :: { expires='' id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref } :type data: dict :returns: token_ref or None. """ raise exception.NotImplemented() def delete_token(self, token_id): """Deletes a token by id. :param token_id: identity of the token :type token_id: string :returns: None. :raises: keystone.exception.TokenNotFound """ raise exception.NotImplemented() def list_tokens(self, user_id): """Returns a list of current token_id's for a user :param user_id: identity of the user :type user_id: string :returns: list of token_id's """ raise exception.NotImplemented() def _get_default_expire_time(self): """Determine when a token should expire based on the config. :returns: a naive utc datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) return datetime.datetime.utcnow() + expire_delta
./CrossVul/dataset_final_sorted/CWE-264/py/good_3693_3