code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def set_peer_link(self, value=None, default=False, disable=False): return self._configure_mlag('peer-link', value, default, disable)
Configures the mlag peer-link value Args: value (str): The value to configure the peer-link default (bool): Configures the peer-link using the default keyword disable (bool): Negates the peer-link using the no keyword Returns: bool: Returns True if the commands complete successfully
def _css_helper(self): entries = [entry for entry in self._plugin_manager.call_hook("css") if entry is not None] entries += self._get_ctx()["css"] entries = ["<link href='" + entry + "' rel='stylesheet'>" for entry in entries] return "\n".join(entries)
Add CSS links for the current page and for the plugins
def _get_stats_from_socket(self, name): try: json_blob = subprocess.check_output( [self.config['ceph_binary'], '--admin-daemon', name, 'perf', 'dump', ]) except subprocess.CalledProcessError as err: self.log.info('Could not get stats from %s: %s', name, err) self.log.exception('Could not get stats from %s' % name) return {} try: json_data = json.loads(json_blob) except Exception as err: self.log.info('Could not parse stats from %s: %s', name, err) self.log.exception('Could not parse stats from %s' % name) return {} return json_data
Return the parsed JSON data returned when ceph is told to dump the stats from the named socket. In the event of an error error, the exception is logged, and an empty result set is returned.
def inventory(self, all=False, ssid=None): if all or self.api_key is None: if ssid is not None: return self._ssid_inventory(self.full_inventory, ssid) else: return self.full_inventory else: if ssid is not None: return self._ssid_inventory(self.self_inventory, ssid) else: return self.self_inventory
Returns a node inventory. If an API key is specified, only the nodes provisioned by this key will be returned. :return: { inventory }
def __store_cash_balances_per_currency(self, cash_balances): cash = self.model.get_cash_asset_class() for cur_symbol in cash_balances: item = CashBalance(cur_symbol) item.parent = cash quantity = cash_balances[cur_symbol]["total"] item.value = Decimal(quantity) item.currency = cur_symbol cash.stocks.append(item) self.model.stocks.append(item)
Store balance per currency as Stock records under Cash class
def _create_response_record(self, response): record = dict() record['id'] = response['id'] record['type'] = response['type'] record['name'] = self._full_name(response['name']) if 'content' in response: record['content'] = response['content'] or "" if 'ttl' in response: record['ttl'] = response['ttl'] if 'prio' in response: record['priority'] = response['prio'] return record
Creates record for lexicon API calls
def calcDrawingProbs(self): wmg = self.wmg phi = self.phi weights = [] for i in range(0, len(wmg.keys())): weights.append(phi**i) totalWeight = sum(weights) for i in range(0, len(wmg.keys())): weights[i] = weights[i]/totalWeight return weights
Returns a vector that contains the probabily of an item being from each position. We say that every item in a order vector is drawn with weight phi^i where i is its position.
def generate(env): SCons.Tool.createSharedLibBuilder(env) SCons.Tool.createProgBuilder(env) env['LINK'] = '$CC' env['LINKFLAGS'] = SCons.Util.CLVar('') env['LINKCOM'] = '$LINK -q $LINKFLAGS -e$TARGET $SOURCES $LIBS' env['LIBDIRPREFIX']='' env['LIBDIRSUFFIX']='' env['LIBLINKPREFIX']='' env['LIBLINKSUFFIX']='$LIBSUFFIX'
Add Builders and construction variables for Borland ilink to an Environment.
def timedelta_seconds(timedelta): return (timedelta.total_seconds() if hasattr(timedelta, "total_seconds") else timedelta.days * 24 * 3600 + timedelta.seconds + timedelta.microseconds / 1000000.)
Returns the total timedelta duration in seconds.
def group(self): split_count = self._url.lower().find("/content/") len_count = len('/content/') gURL = self._url[:self._url.lower().find("/content/")] + \ "/community/" + self._url[split_count+ len_count:] return CommunityGroup(url=gURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the community.Group class for the current group
def get_thermostat_state_by_id(self, id_): return next((state for state in self.thermostat_states if state.id == id_), None)
Retrieves a thermostat state object by its id :param id_: The id of the thermostat state :return: The thermostat state object
def get_cluster_port_names(self, cluster_name): port_names = list() for host_name in self.get_hosts_by_clusters()[cluster_name]: port_names.extend(self.get_hosts_by_name(host_name)) return port_names
return a list of the port names under XIV CLuster
def find_link(self, href_pattern, make_absolute=True): if make_absolute: self.tree.make_links_absolute(self.doc.url) if isinstance(href_pattern, six.text_type): raise GrabMisuseError('Method `find_link` accepts only ' 'byte-string argument') href_pattern = make_unicode(href_pattern) for elem, _, link, _ in self.tree.iterlinks(): if elem.tag == 'a' and href_pattern in link: return link return None
Find link in response body which href value matches ``href_pattern``. Returns found url or None.
def update(self, **kwargs): ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs) post_bulk_operation.send(sender=self.model, model=self.model) return ret_val
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
def unsubscribe(self, topic): if self.sock == NC.INVALID_SOCKET: return NC.ERR_NO_CONN self.logger.info("UNSUBSCRIBE: %s", topic) return self.send_unsubscribe(False, [utf8encode(topic)])
Unsubscribe to some topic.
def write(notebook, file_or_stream, fmt, version=nbformat.NO_CONVERT, **kwargs): text = u'' + writes(notebook, fmt, version, **kwargs) file_or_stream.write(text) if not text.endswith(u'\n'): file_or_stream.write(u'\n')
Write a notebook to a file
def _parse_signed_int_components(buf): sign_bit = 0 value = 0 first = True while True: ch = buf.read(1) if ch == b'': break octet = ord(ch) if first: if octet & _SIGNED_INT_SIGN_MASK: sign_bit = 1 value = octet & _SIGNED_INT_SIGN_VALUE_MASK first = False else: value <<= 8 value |= octet return sign_bit, value
Parses the remainder of a file-like object as a signed magnitude value. Returns: Returns a pair of the sign bit and the unsigned magnitude.
def rprof(self): if self.istep not in self.sdat.rprof.index.levels[0]: return None return self.sdat.rprof.loc[self.istep]
Radial profiles data of the time step. Set to None if no radial profiles data is available for this time step.
def dict_stack(dict_list, key_prefix=''): r dict_stacked_ = defaultdict(list) for dict_ in dict_list: for key, val in six.iteritems(dict_): dict_stacked_[key_prefix + key].append(val) dict_stacked = dict(dict_stacked_) return dict_stacked
r""" stacks values from two dicts into a new dict where the values are list of the input values. the keys are the same. DEPRICATE in favor of dict_stack2 Args: dict_list (list): list of dicts with similar keys Returns: dict dict_stacked CommandLine: python -m utool.util_dict --test-dict_stack python -m utool.util_dict --test-dict_stack:1 Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked = dict_stack([dict1_, dict2_]) >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]} Example1: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> # Get equivalent behavior with dict_stack2? >>> # Almost, as long as None is not part of the list >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked_ = dict_stack2([dict1_, dict2_]) >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()} >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]}
def within_polygon(self, polygon, distance=None, **kwargs): if distance: zone_polygon = polygon.dilate(distance) else: zone_polygon = polygon upper_depth, lower_depth = _check_depth_limits(kwargs) valid_depth = np.logical_and( self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) catalogue_mesh = Mesh(self.catalogue.data['longitude'], self.catalogue.data['latitude'], self.catalogue.data['depth']) valid_id = np.logical_and(valid_depth, zone_polygon.intersects(catalogue_mesh)) return self.select_catalogue(valid_id)
Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
def annotate(self, sent): preds = [] words = [] for word, fv in self.sent2examples(sent): probs = self.predictor(fv) tags = probs.argsort() tag = self.ID_TAG[tags[-1]] words.append(word) preds.append(tag) annotations = zip(words, preds) return annotations
Annotate a squence of words with entity tags. Args: sent: sequence of strings/words.
def generate(variables, template): env = jinja2.Environment(undefined=jinja2.StrictUndefined) for c in expand(variables): c['rc'] = rc yield env.from_string(template).render(c)
Yields a resolved "template" for each config set and dumps on output This function will extrapolate the ``template`` file using the contents of ``variables`` and will output individual (extrapolated, expanded) files in the output directory ``output``. Parameters: variables (str): A string stream containing the variables to parse, in YAML format as explained on :py:func:`expand`. template (str): A string stream containing the template to extrapolate Yields: str: A generated template you can save Raises: jinja2.UndefinedError: if a variable used in the template is undefined
def transform(self, data=None): if data is None: return self.xform_data else: formatted = format_data( data, semantic=self.semantic, vectorizer=self.vectorizer, corpus=self.corpus, ppca=True) norm = normalizer(formatted, normalize=self.normalize) reduction = reducer( norm, reduce=self.reduce, ndims=self.reduce['params']['n_components']) return aligner(reduction, align=self.align)
Return transformed data, or transform new data using the same model parameters Parameters ---------- data : numpy array, pandas dataframe or list of arrays/dfs The data to transform. If no data is passed, the xform_data from the DataGeometry object will be returned. Returns ---------- xformed_data : list of numpy arrays The transformed data
def _function_add_node(self, cfg_node, function_addr): snippet = self._to_snippet(cfg_node=cfg_node) self.kb.functions._add_node(function_addr, snippet)
Adds node to function manager, converting address to CodeNode if possible :param CFGNode cfg_node: A CFGNode instance. :param int function_addr: Address of the current function. :return: None
def _py_ex_argtype(executable): result = [] for p in executable.ordered_parameters: atypes = p.argtypes if atypes is not None: result.extend(p.argtypes) else: print(("No argtypes for: {}".format(p.definition()))) if type(executable).__name__ == "Function": result.extend(executable.argtypes) return result
Returns the code to create the argtype to assign to the methods argtypes attribute.
def check_config(self, config, name=''): config = config.get(self.config_name, {}) extras = set(config.keys()).difference(self.default_config) if 'config' not in self.services and extras: raise ConfigurationError( 'Unsupported config options for "%s": %s' % (self.config_name, ', '.join(extras))) missing = set(self.default_config).difference(config) if missing: raise ConfigurationError( 'Missing config options for "%s": %s' % (self.config_name, ', '.join(missing))) duplicates = set(config.keys()).intersection(set(self.services)) if duplicates: raise ConfigurationError( 'Disallowed config options for "%s": %s' % (self.config_name, ', '.join(duplicates)))
Check that the configuration for this object is valid. This is a more restrictive check than for most :mod:`yakonfig` objects. It will raise :exc:`yakonfig.ConfigurationError` if `config` contains any keys that are not in the underlying callable's parameter list (that is, extra unused configuration options). This will also raise an exception if `config` contains keys that duplicate parameters that should be provided by the factory. .. note:: This last behavior is subject to change; future versions of the library may allow configuration to provide local configuration for a factory-provided object. :param dict config: the parent configuration dictionary, probably contains :attr:`config_name` as a key :param str name: qualified name of this object in the configuration :raise: :exc:`yakonfig.ConfigurationError` if excess parameters exist
def skipline(self): position = self.tell() prefix = self._fix() self.seek(prefix, 1) suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match.
def unmatched_quotes_in_line(text): text = text.replace("\\'", "") text = text.replace('\\"', '') if text.count('"') % 2: return '"' elif text.count("'") % 2: return "'" else: return ''
Return whether a string has open quotes. This simply counts whether the number of quote characters of either type in the string is odd. Take from the IPython project (in IPython/core/completer.py in v0.13) Spyder team: Add some changes to deal with escaped quotes - Copyright (C) 2008-2011 IPython Development Team - Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu> - Copyright (C) 2001 Python Software Foundation, www.python.org Distributed under the terms of the BSD License.
def get_processing_block_ids(): ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) for processing_block in config['processing_blocks']: ids.append(processing_block['id']) return ids
Return an array of Processing Block ids
def _handle_info(self, *args, **kwargs): if 'version' in kwargs: self.api_version = kwargs['version'] print("Initialized API with version %s" % self.api_version) return try: info_code = str(kwargs['code']) except KeyError: raise FaultyPayloadError("_handle_info: %s" % kwargs) if not info_code.startswith('2'): raise ValueError("Info Code must start with 2! %s", kwargs) output_msg = "_handle_info(): %s" % kwargs log.info(output_msg) try: self._code_handlers[info_code]() except KeyError: raise UnknownWSSInfo(output_msg)
Handles info messages and executed corresponding code
def fetch(self): returnResults = [] results = self._query.run() for result in results: if self._join: item = self._model.fromRawEntry(**result["left"]) joined = self._join.fromRawEntry(**result["right"]) item.protectedItems = self._joinedField item[self._joinedField] = joined else: item = self._model.fromRawEntry(**result) returnResults.append(item) self._documents = returnResults return self._documents
Fetches the query and then tries to wrap the data in the model, joining as needed, if applicable.
def system(self): url = self._url + "/system" return _System(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
Creates a reference to the System operations for Portal
def get_endpoints_using_catalog_api(domain, token): headers = {"X-App-Token": token} uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [] endpoints_thus_far = set() offset = 0 while True: try: r = requests.get(uri.format(domain, offset), headers=headers) r.raise_for_status() except requests.HTTPError: raise requests.HTTPError("An HTTP error was raised during Socrata API ingestion.".format(domain)) data = r.json() endpoints_returned = {r['resource']['id'] for r in data['results']} new_endpoints = endpoints_returned.difference(endpoints_thus_far) if len(new_endpoints) >= 999: ret += data['results'] endpoints_thus_far.update(new_endpoints) offset += 1000 continue else: ret += [r for r in data['results'] if r['resource']['id'] in new_endpoints] break return ret
Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API.
def warp_locations(locations, y_center=None, return_ellipsoid=False, verbose=False): locations = np.asarray(locations) if y_center is None: c, r = _fit_ellipsoid_full(locations) else: c, r = _fit_ellipsoid_partial(locations, y_center) elliptic_locations = _project_on_ellipsoid(c, r, locations) if verbose: print('Head ellipsoid center:', c) print('Head ellipsoid radii:', r) distance = np.sqrt(np.sum((locations - elliptic_locations)**2, axis=1)) print('Minimum electrode displacement:', np.min(distance)) print('Average electrode displacement:', np.mean(distance)) print('Maximum electrode displacement:', np.max(distance)) spherical_locations = (elliptic_locations - c) / r if return_ellipsoid: return spherical_locations, c, r return spherical_locations
Warp EEG electrode locations to spherical layout. EEG Electrodes are warped to a spherical layout in three steps: 1. An ellipsoid is least-squares-fitted to the electrode locations. 2. Electrodes are displaced to the nearest point on the ellipsoid's surface. 3. The ellipsoid is transformed to a sphere, causing the new locations to lie exactly on a spherical surface with unit radius. This procedure intends to minimize electrode displacement in the original coordinate space. Simply projecting electrodes on a sphere (e.g. by normalizing the x/y/z coordinates) typically gives much larger displacements. Parameters ---------- locations : array-like, shape = [n_electrodes, 3] Eeach row of `locations` corresponds to the location of an EEG electrode in cartesian x/y/z coordinates. y_center : float, optional Fix the y-coordinate of the ellipsoid's center to this value (optional). This is useful to align the ellipsoid with the central electrodes. return_ellipsoid : bool, optional If `true` center and radii of the ellipsoid are returned. Returns ------- newlocs : array-like, shape = [n_electrodes, 3] Electrode locations on unit sphere. c : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Center of the ellipsoid in the original location's coordinate space. r : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Radii (x, y, z) of the ellipsoid in the original location's coordinate space.
def all_library_calls(self): if self._all_library_calls is None: self._all_library_calls = self._explore_functions(lambda x: x.library_calls) return self._all_library_calls
recursive version of library calls
def reply_inform(cls, req_msg, *args): return cls(cls.INFORM, req_msg.name, args, req_msg.mid)
Helper method for creating inform messages in reply to a request. Copies the message name and message identifier from request message. Parameters ---------- req_msg : katcp.core.Message instance The request message that this inform if in reply to args : list of strings The message arguments except name
def cancel_room(self, booking_id): resp = self._request("POST", "/1.1/space/cancel/{}".format(booking_id)) return resp.json()
Cancel a room given a booking id. :param booking_id: A booking id or a list of booking ids (separated by commas) to cancel. :type booking_id: str
def build(self): if self.colour: embed = discord.Embed( title=self.title, type='rich', description=self.description, colour=self.colour) else: embed = discord.Embed( title=self.title, type='rich', description=self.description) if self.thumbnail: embed.set_thumbnail(url=self.thumbnail) if self.image: embed.set_image(url=self.image) embed.set_author( name="Modis", url="https://musicbyango.com/modis/", icon_url="http://musicbyango.com/modis/dp/modis64t.png") for pack in self.datapacks: embed.add_field( name=pack[0], value=pack[1], inline=pack[2] ) return embed
Builds Discord embed GUI Returns: discord.Embed: Built GUI
def printParams(paramDictionary, all=False, log=None): if log is not None: def output(msg): log.info(msg) else: def output(msg): print(msg) if not paramDictionary: output('No parameters were supplied') else: for key in sorted(paramDictionary): if all or (not isinstance(paramDictionary[key], dict)) \ and key[0] != '_': output('\t' + '\t'.join([str(key) + ' :', str(paramDictionary[key])])) if log is None: output('\n')
Print nicely the parameters from the dictionary.
def check_num(self, checks, radl): prefixes = {} for f in self.features: if not isinstance(f, Feature): continue (prefix, sep, tail) = f.prop.partition(".") if not sep or prefix not in checks: continue checks0 = checks[prefix] (num, sep, suffix) = tail.partition(".") try: num = int(num) except: raise RADLParseException( "Invalid property name; expected an index.", line=f.line) if not sep or suffix not in checks0: continue f._check(checks0[suffix], radl) if prefix not in prefixes: prefixes[prefix] = set() prefixes[prefix].add(num) for prefix, nums in prefixes.items(): if min(nums) != 0 or max(nums) != len(nums) - 1: raise RADLParseException( "Invalid indices values in properties '%s'" % prefix) return prefixes
Check types, operators and units in features with numbers. Args: - checks(dict of dict of str:tuples): keys are property name prefixes, and the values are dict with keys are property name suffixes and values are iterable as in ``_check_feature``. - radl: passed to ``_check_feature``.
def template(self): s = Template(self._IPTABLES_TEMPLATE) return s.substitute(filtertable='\n'.join(self.filters), rawtable='\n'.join(self.raw), mangletable='\n'.join(self.mangle), nattable='\n'.join(self.nat), date=datetime.today())
Create a rules file in iptables-restore format
def bb_get_instr_max_width(basic_block): asm_mnemonic_max_width = 0 for instr in basic_block: if len(instr.mnemonic) > asm_mnemonic_max_width: asm_mnemonic_max_width = len(instr.mnemonic) return asm_mnemonic_max_width
Get maximum instruction mnemonic width
def _get_object_key(self, p_object): matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
Get key from object
def grep_file(query, item): return ['%s: %s' % (item, line) for line in open(item) if re.search(query, line)]
This function performs the actual grep on a given file.
def _make_sync_method(name): def sync_wrapper(self, *args, **kwds): method = getattr(self, name) future = method(*args, **kwds) return future.get_result() return sync_wrapper
Helper to synthesize a synchronous method from an async method name. Used by the @add_sync_methods class decorator below. Args: name: The name of the synchronous method. Returns: A method (with first argument 'self') that retrieves and calls self.<name>, passing its own arguments, expects it to return a Future, and then waits for and returns that Future's result.
def save_credentials(self, profile): filename = profile_path(S3_PROFILE_ID, profile) creds = { "access_key": self.access_key, "secret_key": self.secret_key } dump_to_json(filename, creds)
Saves credentials to a dotfile so you can open them grab them later. Parameters ---------- profile: str name for your profile (i.e. "dev", "prod")
def _create_serial_ports(serial_ports): ports = [] keys = range(-9000, -9050, -1) if serial_ports: devs = [serial['adapter'] for serial in serial_ports] log.trace('Creating serial ports %s', devs) for port, key in zip(serial_ports, keys): serial_port_device = _apply_serial_port(port, key, 'add') ports.append(serial_port_device) return ports
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing the serial ports to be created for a virtual machine serial_ports Serial port properties
def warning(self, msg, indent=0, **kwargs): return self.logger.warning(self._indent(msg, indent), **kwargs)
invoke ``self.logger.warning``
def get_top_tags(self, limit=None, cacheable=True): doc = _Request(self, "tag.getTopTags").execute(cacheable) seq = [] for node in doc.getElementsByTagName("tag"): if limit and len(seq) >= limit: break tag = Tag(_extract(node, "name"), self) weight = _number(_extract(node, "count")) seq.append(TopItem(tag, weight)) return seq
Returns the most used tags as a sequence of TopItem objects.
def set_extent_location(self, new_location, main_vd_extent, reserve_vd_extent): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure not yet initialized') self.new_extent_loc = new_location self.desc_tag.tag_location = new_location self.main_vd_extent = main_vd_extent self.reserve_vd_extent = reserve_vd_extent
A method to set a new location for this Anchor Volume Structure. Parameters: new_location - The new extent that this Anchor Volume Structure should be located at. main_vd_extent - The extent containing the main Volume Descriptors. reserve_vd_extent - The extent containing the reserve Volume Descriptors. Returns: Nothing.
def destination(self): return os.path.join(os.path.abspath(self.outdir), self.filename)
Get the destination path. This is the property should be calculated every time it is used because a user could change the outdir and filename dynamically.
def write(self, obj, **kwargs): super().write(obj, **kwargs) for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseSeriesFixed(self.parent, node) s.write(ss) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index('columns', obj.columns)
write it as a collection of individual sparse series
def add2python(self, module=None, up=0, down=None, front=False, must_exist=True): if module: try: return import_module(module) except ImportError: pass dir = self.dir().ancestor(up) if down: dir = dir.join(*down) if dir.isdir(): if dir not in sys.path: if front: sys.path.insert(0, dir) else: sys.path.append(dir) elif must_exist: raise ImportError('Directory {0} not available'.format(dir)) else: return None if module: try: return import_module(module) except ImportError: if must_exist: raise
Add a directory to the python path. :parameter module: Optional module name to try to import once we have found the directory :parameter up: number of level to go up the directory three from :attr:`local_path`. :parameter down: Optional tuple of directory names to travel down once we have gone *up* levels. :parameter front: Boolean indicating if we want to insert the new path at the front of ``sys.path`` using ``sys.path.insert(0,path)``. :parameter must_exist: Boolean indicating if the module must exists.
def _linux_stp(br, state): brctl = _tool_path('brctl') return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state), python_shell=False)
Internal, sets STP state
def fail_request(self, orig_request, message, start_response): cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_error_response( message, start_response, cors_handler=cors_handler)
Write an immediate failure response to outfile, no redirect. This calls start_response and returns the error body. Args: orig_request: An ApiRequest, the original request from the user. message: A string containing the error message to be displayed to user. start_response: A function with semantics defined in PEP-333. Returns: A string containing the body of the error response.
def adj_nodes_aws(aws_nodes): for node in aws_nodes: node.cloud = "aws" node.cloud_disp = "AWS" node.private_ips = ip_to_str(node.private_ips) node.public_ips = ip_to_str(node.public_ips) node.zone = node.extra['availability'] node.size = node.extra['instance_type'] node.type = node.extra['instance_lifecycle'] return aws_nodes
Adjust details specific to AWS.
def timestamp(num_params, p_levels, k_choices, N): string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), "%d%m%y%H%M%S")) return string
Returns a uniform timestamp with parameter values for file identification
def mod(self): if self._mod is None: self._mod = self.compile_and_import_binary() return self._mod
Cached compiled binary of the Generic_Code class. To clear cache invoke :meth:`clear_mod_cache`.
def get_users(self, fetch=True): return Users(self.resource.users, self.client, populate=fetch)
Return this Applications's users object, populating it if fetch is True.
def setCmd(self, cmd): cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd
Check the cmd is valid, FrameError will be raised if its not.
def expression_list_to_conjunction(expression_list): if not isinstance(expression_list, list): raise AssertionError(u'Expected `list`, Received {}.'.format(expression_list)) if len(expression_list) == 0: return TrueLiteral if not isinstance(expression_list[0], Expression): raise AssertionError(u'Non-Expression object {} found in expression_list' .format(expression_list[0])) if len(expression_list) == 1: return expression_list[0] else: return BinaryComposition(u'&&', expression_list_to_conjunction(expression_list[1:]), expression_list[0])
Convert a list of expressions to an Expression that is the conjunction of all of them.
def modify(self, vals): self.vals = vals.view(np.ndarray).copy() y = self.model.predict(self.vals)[0] self.data_visualize.modify(y) self.latent_handle.set_data(self.vals[0,self.latent_index[0]], self.vals[0,self.latent_index[1]]) self.axes.figure.canvas.draw()
When latent values are modified update the latent representation and ulso update the output visualization.
def to_feature(self, name=None, feature_type='misc_feature'): if name is None: if not self.name: raise ValueError('name attribute missing from DNA instance' ' and arguments') name = self.name return Feature(name, start=0, stop=len(self), feature_type=feature_type)
Create a feature from the current object. :param name: Name for the new feature. Must be specified if the DNA instance has no .name attribute. :type name: str :param feature_type: The type of feature (genbank standard). :type feature_type: str
def account(transition, direction=Direction.BIDIRECTIONAL): if direction != Direction.BIDIRECTIONAL: return directed_account(transition, direction) return Account(directed_account(transition, Direction.CAUSE) + directed_account(transition, Direction.EFFECT))
Return the set of all causal links for a |Transition|. Args: transition (Transition): The transition of interest. Keyword Args: direction (Direction): By default the account contains actual causes and actual effects.
def from_file(cls, filename="CTRL", **kwargs): with zopen(filename, "rt") as f: contents = f.read() return LMTOCtrl.from_string(contents, **kwargs)
Creates a CTRL file object from an existing file. Args: filename: The name of the CTRL file. Defaults to 'CTRL'. Returns: An LMTOCtrl object.
async def detach_tip(data): global session if not feature_flags.use_protocol_api_v2(): pipette = session.pipettes[session.current_mount] if not pipette.tip_attached: log.warning('detach tip called with no tip') pipette._remove_tip(session.tip_length) else: session.adapter.remove_tip(session.current_mount) if session.cp: session.cp = CriticalPoint.NOZZLE session.tip_length = None return web.json_response({'message': "Tip removed"}, status=200)
Detach the tip from the current pipette :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'detach tip' }
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name="decoder"): x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = common_layers.conv_hidden_relu( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
def partition_ordered(sequence, key=None): yield from ((k, list(g)) for k, g in groupby(sequence, key=key))
Partition ordered sequence by key. Sequence is expected to already be ordered. Parameters ---------- sequence: iterable data. key: partition key function Yields ------- iterable tuple(s) of partition key, data list pairs. Examples -------- 1. By object attributes. Partition sequence of objects by a height and weight attributes into an ordered dict. >> attributes = ('height', 'weight') >> OrderedDict(partition_ordered(sequence, attrgetter(*attributes))) 2. By index items. Partition sequence by the first character index of each element. >> index = 0 >> sequence = ['112', '124', '289', '220', 'Z23'] >> list(partition_ordered(sequence, itemgetter(index)))
def Rz_matrix(theta): return np.array([ [np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1] ])
Rotation matrix around the Z axis
def _any(objs, query): for obj in objs: if isinstance(obj, Document): if _any(obj.roots, query): return True else: if any(query(ref) for ref in obj.references()): return True else: return False
Whether any of a collection of objects satisfies a given query predicate Args: objs (seq[Model or Document]) : query (callable) Returns: True, if ``query(obj)`` is True for some object in ``objs``, else False
def decrease_user_property(self, user_id, property_name, value=0, headers=None, endpoint_url=None): endpoint_url = endpoint_url or self._endpoint_url url = endpoint_url + "/users/" + user_id + "/properties/" + property_name + "/decrease/" + value.__str__() headers = headers or self._default_headers(content_type="") response = requests.post(url, headers=headers) return response
Decrease a user's property by a value. :param str user_id: identified user's ID :param str property_name: user property name to increase :param number value: amount by which to decrease the property :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response
def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers
def _resolve_subkeys(key, separator="."): parts = key.split(separator, 1) if len(parts) > 1: return parts else: return parts[0], None
Resolve a potentially nested key. If the key contains the ``separator`` (e.g. ``.``) then the key will be split on the first instance of the subkey:: >>> _resolve_subkeys('a.b.c') ('a', 'b.c') >>> _resolve_subkeys('d|e|f', separator='|') ('d', 'e|f') If not, the subkey will be :data:`None`:: >>> _resolve_subkeys('foo') ('foo', None) Args: key (str): A string that may or may not contain the separator. separator (str): The namespace separator. Defaults to `.`. Returns: Tuple[str, str]: The key and subkey(s).
def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, **kwargs): return _random_helper(_internal._random_generalized_negative_binomial, _internal._sample_generalized_negative_binomial, [mu, alpha], shape, dtype, kwargs)
Draw random samples from a generalized negative binomial distribution. Samples are distributed according to a generalized negative binomial distribution parametrized by *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the number of unsuccessful experiments (generalized to real numbers). Samples will always be returned as a floating point data type. Parameters ---------- mu : float or Symbol, optional Mean of the negative binomial distribution. alpha : float or Symbol, optional Alpha (dispersion) parameter of the negative binomial distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and `alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and `alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
def warning_handler(self, handler): if not self.opened(): handler = handler or util.noop self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler) self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)
Setter for the warning handler function. If the DLL is open, this function is a no-op, so it should be called prior to calling ``open()``. Args: self (JLink): the ``JLink`` instance handler (function): function to call on warning messages Returns: ``None``
def nansum(values, axis=None, skipna=True, min_count=0, mask=None): values, mask, dtype, dtype_max, _ = _get_values(values, skipna, 0, mask=mask) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype)
Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0
def merge_figures(figures): figure={} data=[] for fig in figures: for trace in fig['data']: data.append(trace) layout=get_base_layout(figures) figure['data']=data figure['layout']=layout return figure
Generates a single Figure from a list of figures Parameters: ----------- figures : list(Figures) List of figures to be merged.
def clear_masters(self): packages = [] for mas in Utils().remove_dbs(self.packages): if mas not in self.dependencies: packages.append(mas) self.packages = packages
Clear master packages if already exist in dependencies or if added to install two or more times
def is_empty(self): return (not self.breakpoint and not self.code_analysis and not self.todo and not self.bookmarks)
Return whether the block of user data is empty.
def initialize_remaining_constants(self, value=0): remaining = [] for node, _inputs, _outputs in self.iterate_bfs(): streams = node.input_streams() + [node.stream] for stream in streams: if stream.stream_type is not DataStream.ConstantType: continue if stream not in self.constant_database: self.add_constant(stream, value) remaining.append(stream) return remaining
Ensure that all constant streams referenced in the sensor graph have a value. Constant streams that are automatically created by the compiler are initialized as part of the compilation process but it's possible that the user references other constant streams but never assigns them an explicit initial value. This function will initialize them all to a default value (0 if not passed) and return the streams that were so initialized. Args: value (int): Optional value to use to initialize all uninitialized constants. Defaults to 0 if not passed. Returns: list(DataStream): A list of all of the constant streams that were not previously initialized and were initialized to the given value in this function.
def get_default(): if not is_configured(): raise JutException('No configurations available, please run `jut config add`') for configuration in _CONFIG.sections(): if _CONFIG.has_option(configuration, 'default'): return dict(_CONFIG.items(configuration))
return the attributes associated with the default configuration
def forward(self, channel, date_s, fragment): time_s, sep, nick = fragment.rpartition('.') time = datetime.datetime.strptime(time_s, '%H.%M.%S') date = datetime.datetime.strptime(date_s, '%Y-%m-%d') dt = datetime.datetime.combine(date, time.time()) loc_dt = self.timezone.localize(dt) utc_dt = loc_dt.astimezone(pytz.utc) url_tmpl = '/day/{channel}/{target_date} url = url_tmpl.format( target_date=utc_dt.date().isoformat(), target_time=utc_dt.time().strftime('%H.%M.%S'), **locals() ) raise cherrypy.HTTPRedirect(url, 301)
Given an HREF in the legacy timezone, redirect to an href for UTC.
def FilterMessages( self, Channel, FromID, ToID, Mode): try: res = self.__m_dllBasic.CAN_FilterMessages(Channel,FromID,ToID,Mode) return TPCANStatus(res) except: logger.error("Exception on PCANBasic.FilterMessages") raise
Configures the reception filter Remarks: The message filter will be expanded with every call to this function. If it is desired to reset the filter, please use the 'SetValue' function. Parameters: Channel : A TPCANHandle representing a PCAN Channel FromID : A c_uint value with the lowest CAN ID to be received ToID : A c_uint value with the highest CAN ID to be received Mode : A TPCANMode representing the message type (Standard, 11-bit identifier, or Extended, 29-bit identifier) Returns: A TPCANStatus error code
def _create_token(token_type, value, lineno, lexpos): token = lex.LexToken() token.type = token_type token.value = value token.lineno = lineno token.lexpos = lexpos return token
Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken does not have a constructor defined to make settings these values easy.
def _accumulate_sufficient_statistics(self, stats, X, framelogprob, posteriors, fwdlattice, bwdlattice): stats['nobs'] += 1 if 's' in self.params: stats['start'] += posteriors[0] if 't' in self.params: n_samples, n_components = framelogprob.shape if n_samples <= 1: return log_xi_sum = np.full((n_components, n_components), -np.inf) _hmmc._compute_log_xi_sum(n_samples, n_components, fwdlattice, log_mask_zero(self.transmat_), bwdlattice, framelogprob, log_xi_sum) with np.errstate(under="ignore"): stats['trans'] += np.exp(log_xi_sum)
Updates sufficient statistics from a given sample. Parameters ---------- stats : dict Sufficient statistics as returned by :meth:`~base._BaseHMM._initialize_sufficient_statistics`. X : array, shape (n_samples, n_features) Sample sequence. framelogprob : array, shape (n_samples, n_components) Log-probabilities of each sample under each of the model states. posteriors : array, shape (n_samples, n_components) Posterior probabilities of each sample being generated by each of the model states. fwdlattice, bwdlattice : array, shape (n_samples, n_components) Log-forward and log-backward probabilities.
def find_steam_location(): if registry is None: return None key = registry.CreateKey(registry.HKEY_CURRENT_USER,"Software\Valve\Steam") return registry.QueryValueEx(key,"SteamPath")[0]
Finds the location of the current Steam installation on Windows machines. Returns None for any non-Windows machines, or for Windows machines where Steam is not installed.
def deregister(): for type_, cls in get_pairs(): if type(units.registry.get(type_)) is cls: units.registry.pop(type_) for unit, formatter in _mpl_units.items(): if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: units.registry[unit] = formatter
Remove pandas' formatters and converters Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``datetime.datetime``, are restored to their original value. See Also -------- deregister_matplotlib_converters
def print_rendered_results(results_dict): class _HubComponentEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, base.HubComponent): return repr(o) return json.JSONEncoder.default(self, o) formatted = json.dumps(results_dict, indent=4, cls=_HubComponentEncoder) for s in formatted.splitlines(): print(s.rstrip())
Pretty-prints the rendered results dictionary. Rendered results can be multiply-nested dictionaries; this uses JSON serialization to print a nice representation.
def prepare_impact_function(self): impact_function = ImpactFunction() impact_function.callback = self.progress_callback impact_function.hazard = self.parent.hazard_layer impact_function.exposure = self.parent.exposure_layer aggregation = self.parent.aggregation_layer if aggregation: impact_function.aggregation = aggregation impact_function.use_selected_features_only = ( setting('useSelectedFeaturesOnly', False, bool)) else: impact_function.crs = self.extent.crs mode = setting('analysis_extents_mode') if self.extent.user_extent: wkt = self.extent.user_extent.asWkt() impact_function.requested_extent = wkt_to_rectangle(wkt) elif mode == HAZARD_EXPOSURE_VIEW: impact_function.requested_extent = ( self.iface.mapCanvas().extent()) elif mode == EXPOSURE: impact_function.use_exposure_view_only = True impact_function.debug_mode = False return impact_function
Create analysis as a representation of current situation of IFCW.
def update(self): stats = self.get_init_value() if self.input_method == 'local': stats['cpu'] = cpu_percent.get() stats['percpu'] = cpu_percent.get(percpu=True) stats['mem'] = psutil.virtual_memory().percent stats['swap'] = psutil.swap_memory().percent elif self.input_method == 'snmp': pass if cpuinfo_tag: cpu_info = cpuinfo.get_cpu_info() if cpu_info is not None: stats['cpu_name'] = cpu_info.get('brand', 'CPU') if 'hz_actual_raw' in cpu_info: stats['cpu_hz_current'] = cpu_info['hz_actual_raw'][0] if 'hz_advertised_raw' in cpu_info: stats['cpu_hz'] = cpu_info['hz_advertised_raw'][0] self.stats = stats return self.stats
Update quicklook stats using the input method.
def set_ipcsem_params(self, ftok=None, persistent=None): self._set('ftok', ftok) self._set('persistent-ipcsem', persistent, cast=bool) return self._section
Sets ipcsem lock engine params. :param str|unicode ftok: Set the ipcsem key via ftok() for avoiding duplicates. :param bool persistent: Do not remove ipcsem's on shutdown.
def is_broker_action_done(action, rid=None, unit=None): rdata = relation_get(rid, unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False rsp = CephBrokerRsp(broker_rsp) unit_name = local_unit().partition('/')[2] key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) kvstore = kv() val = kvstore.get(key=key) if val and val == rsp.request_id: return True return False
Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False
def get_proficiencies_for_objectives(self, objective_ids): collection = JSONClientValidated('learning', collection='Proficiency', runtime=self._runtime) result = collection.find( dict({'objectiveId': str(objective_ids)}, **self._view_filter())).sort('_id', ASCENDING) return objects.ProficiencyList(result, runtime=self._runtime)
Gets a ``ProficiencyList`` relating to the given objectives. arg: objective_ids (osid.id.IdList): the objective ``Ids`` return: (osid.learning.ProficiencyList) - the returned ``Proficiency`` list raise: NullArgument - ``objective_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_pg_core(connection_string, *, cursor_factory=None, edit_connection=None): import psycopg2 as pq from psycopg2.extras import NamedTupleCursor def opener(): cn = pq.connect(connection_string) cn.cursor_factory = cursor_factory or NamedTupleCursor if edit_connection: edit_connection(cn) return cn return InjectedDataAccessCore( opener, default_connection_closer, ("%({0})s", "%s", "{0}::{1}"), empty_params=None, supports_timezones=True, supports_returning_syntax=True, get_autocommit=get_pg_autocommit, set_autocommit=set_pg_autocommit)
Creates a simple PostgreSQL core. Requires the psycopg2 library.
def getDiskFreeSpace( self, freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, dokanFileInfo, ): ret = self.operations('getDiskFreeSpace') ctypes.memmove( freeBytesAvailable, ctypes.byref(ctypes.c_longlong(ret['freeBytesAvailable'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfBytes'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfFreeBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfFreeBytes'])), ctypes.sizeof(ctypes.c_longlong), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
def async_session_handler(self, signal: str) -> None: if signal == 'data': self.async_event_handler(self.websocket.data) elif signal == 'state': if self.async_connection_status_callback: self.async_connection_status_callback( self.websocket.state == 'running')
Signalling from websocket. data - new data available for processing. state - network state has changed.
def countExtn(fimg, extname='SCI'): closefits = False if isinstance(fimg, string_types): fimg = fits.open(fimg) closefits = True n = 0 for e in fimg: if 'extname' in e.header and e.header['extname'] == extname: n += 1 if closefits: fimg.close() return n
Return the number of 'extname' extensions, defaulting to counting the number of SCI extensions.
def frame2string(frame): lineno = frame.f_lineno co = frame.f_code filename = co.co_filename name = co.co_name s = '\tFile "{0}", line {1}, in {2}'.format(filename, lineno, name) line = linecache.getline(filename, lineno, frame.f_globals).lstrip() return s + '\n\t\t' + line
Return info about frame. Keyword arg: frame Return string in format: File {file name}, line {line number}, in {name of parent of code object} {newline} Line from file at line number
def __init_url(self): portals_self_url = "{}/portals/self".format(self._url) params = { "f" :"json" } if not self._securityHandler is None: params['token'] = self._securityHandler.token res = self._get(url=portals_self_url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if "helperServices" in res: helper_services = res.get("helperServices") if "hydrology" in helper_services: analysis_service = helper_services.get("elevation") if "url" in analysis_service: self._analysis_url = analysis_service.get("url") self._gpService = GPService(url=self._analysis_url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=False)
loads the information into the class
def get_ignored_lines(self): ignored_lines = set() for line_number, line in enumerate(self.content.split('\n'), 1): if ( WHITELIST_REGEX['yaml'].search(line) or ( self.exclude_lines_regex and self.exclude_lines_regex.search(line) ) ): ignored_lines.add(line_number) return ignored_lines
Return a set of integers that refer to line numbers that were whitelisted by the user and should be ignored. We need to parse the file separately from PyYAML parsing because the parser drops the comments (at least up to version 3.13): https://github.com/yaml/pyyaml/blob/a2d481b8dbd2b352cb001f07091ccf669227290f/lib3/yaml/scanner.py#L749 :return: set