code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
iv = derive_key(hashlib.sha1, PURPOSE_IV_MATERIAL, password, salt, iteration_count, 16) key = derive_key(hashlib.sha1, PURPOSE_KEY_MATERIAL, password, salt, iteration_count, 256//8) encrypted_data = bytearray(encrypted_data) encrypted_data_len = len(encrypted_data) if encrypted_data_len % 16...
def decrypt_PBEWithSHAAndTwofishCBC(encrypted_data, password, salt, iteration_count)
Decrypts PBEWithSHAAndTwofishCBC, assuming PKCS#12-generated PBE parameters. (Not explicitly defined as an algorithm in RFC 7292, but defined here nevertheless because of the assumption of PKCS#12 parameters).
2.798462
2.975789
0.94041
iv = derive_key(hashlib.sha1, PURPOSE_IV_MATERIAL, password, salt, iteration_count, 16) key = derive_key(hashlib.sha1, PURPOSE_KEY_MATERIAL, password, salt, iteration_count, 256//8) plaintext_data = add_pkcs7_padding(plaintext_data, 16) plaintext_data = bytearray(plaintext_data) plaintext_le...
def encrypt_PBEWithSHAAndTwofishCBC(plaintext_data, password, salt, iteration_count)
Encrypts a value with PBEWithSHAAndTwofishCBC, assuming PKCS#12-generated PBE parameters. (Not explicitly defined as an algorithm in RFC 7292, but defined here nevertheless because of the assumption of PKCS#12 parameters).
2.506671
2.705597
0.926476
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units iv = os.urandom(20) key = bytearray(key) xoring = zip(key, _jks_keystream(iv, password_bytes)) data = bytearray([d^k for d,k in xoring]) check = hashlib.sha1(bytes(password_bytes + key)).digest() ret...
def jks_pkey_encrypt(key, password_str)
Encrypts the private key with password protection algorithm used by JKS keystores.
5.438236
5.311666
1.023829
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units data = bytearray(data) iv, data, check = data[:20], data[20:-20], data[-20:] xoring = zip(data, _jks_keystream(iv, password_bytes)) key = bytearray([d^k for d,k in xoring]) if hashlib.sha1(bytes(password...
def jks_pkey_decrypt(data, password_str)
Decrypts the private key password protection algorithm used by JKS keystores. The JDK sources state that 'the password is expected to be in printable ASCII', though this does not appear to be enforced; the password is converted into bytes simply by taking each individual Java char and appending its raw 2-byte r...
5.800383
5.600222
1.035742
cur = iv while 1: xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3 for byte in cur: yield byte
def _jks_keystream(iv, password)
Helper keystream generator for _jks_pkey_decrypt
8.740587
8.335293
1.048624
key, iv = _jce_pbe_derive_key_and_iv(password, salt, iteration_count) from Cryptodome.Cipher import DES3 des3 = DES3.new(key, DES3.MODE_CBC, IV=iv) padded = des3.decrypt(data) result = strip_pkcs5_padding(padded) return result
def jce_pbe_decrypt(data, password, salt, iteration_count)
Decrypts Sun's custom PBEWithMD5AndTripleDES password-based encryption scheme. It is based on password-based encryption as defined by the PKCS #5 standard, except that it uses triple DES instead of DES. Here's how this algorithm works: 1. Create random salt and split it in two halves. If the two halves ar...
2.543794
3.013757
0.844061
salt = bytearray(salt_half) salt[2] = salt[1] salt[1] = salt[0] salt[0] = salt[3] return bytes(salt)
def _jce_invert_salt_half(salt_half)
JCE's proprietary PBEWithMD5AndTripleDES algorithm as described in the OpenJDK sources calls for inverting the first salt half if the two halves are equal. However, there appears to be a bug in the original JCE implementation of com.sun.crypto.provider.PBECipherCore causing it to perform a different operation: ...
2.832021
3.25985
0.868758
bitlist = list(bitstr) bits_missing = (8 - len(bitlist) % 8) % 8 bitlist = [0]*bits_missing + bitlist # pad with 0 bits to a multiple of 8 result = bytearray() for i in range(0, len(bitlist), 8): byte = 0 for j in range(8): byte = (byte << 1) | bitlist[i+j] r...
def bitstring_to_bytes(bitstr)
Converts a pyasn1 univ.BitString instance to byte sequence of type 'bytes'. The bit string is interpreted big-endian and is left-padded with 0 bits to form a multiple of 8.
2.169989
2.081795
1.042364
if len(m) < block_size or len(m) % block_size != 0: raise BadPaddingException("Unable to strip padding: invalid message length") m = bytearray(m) # py2/3 compatibility: always returns individual indexed elements as ints last_byte = m[-1] # the <last_byte> bytes of m must all have value <la...
def strip_pkcs7_padding(m, block_size)
Same as PKCS#5 padding, except generalized to block sizes other than 8.
4.544801
4.520961
1.005273
with open(filename, 'rb') as file: input_bytes = file.read() ret = cls.loads(input_bytes, store_password, try_decrypt_keys=try_decrypt_keys) return ret
def load(cls, filename, store_password, try_decrypt_keys=True)
Convenience wrapper function; reads the contents of the given file and passes it through to :func:`loads`. See :func:`loads`.
2.828402
2.675895
1.056993
with open(filename, 'wb') as file: keystore_bytes = self.saves(store_password) file.write(keystore_bytes)
def save(self, filename, store_password)
Convenience wrapper function; calls the :func:`saves` and saves the content to a file.
4.42176
3.55075
1.245303
size = b2.unpack_from(data, pos)[0] pos += 2 try: return data[pos:pos+size].decode('utf-8'), pos+size except (UnicodeEncodeError, UnicodeDecodeError) as e: raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))...
def _read_utf(cls, data, pos, kind=None)
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?). Used to construct more informative exception messages when a decoding error occurs.
4.243786
3.890789
1.090726
return dict([(a, e) for a, e in self.entries.items() if isinstance(e, BksSealedKeyEntry)])
def sealed_keys(self)
A subset of the :attr:`entries` dictionary, filtered down to only those entries of type :class:`BksSealedKeyEntry`.
10.217973
3.942284
2.591892
return dict([(a, e) for a, e in self.entries.items() if isinstance(e, BksKeyEntry)])
def plain_keys(self)
A subset of the :attr:`entries` dictionary, filtered down to only those entries of type :class:`BksKeyEntry`.
10.875808
4.151372
2.61981
try: pos = 0 version = b4.unpack_from(data, pos)[0]; pos += 4 if version not in [1,2]: raise UnsupportedKeystoreVersionException("Unsupported BKS keystore version; only V1 and V2 supported, found v"+repr(version)) salt, pos = cls._read_da...
def loads(cls, data, store_password, try_decrypt_keys=True)
See :meth:`jks.jks.KeyStore.loads`. :param bytes data: Byte string representation of the keystore to be loaded. :param str password: Keystore password string :param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password ...
3.266485
3.074871
1.062316
key_type = b1.unpack_from(data, pos)[0]; pos += 1 key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format") key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm") key_enc, pos = BksKeyStore._read_data(data, pos) entry = BksKeyEntry(key_t...
def _read_bks_key(cls, data, pos, store_type)
Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry.
3.207921
2.90851
1.102943
# Uber keystores contain the same entry data as BKS keystores, except they wrap it differently: # BKS = BKS_store || HMAC-SHA1(BKS_store) # UBER = PBEWithSHAAndTwofish-CBC(BKS_store || SHA1(BKS_store)) # # where BKS_store represents the entry format shared by both...
def loads(cls, data, store_password, try_decrypt_keys=True)
See :meth:`jks.jks.KeyStore.loads`. :param bytes data: Byte string representation of the keystore to be loaded. :param str password: Keystore password string :param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password ...
4.528519
4.357716
1.039196
num_columns = int(ceil(sqrt(n))) num_rows = int(ceil(n / float(num_columns))) return (num_columns, num_rows)
def calc_columns_rows(n)
Calculate the number of columns and rows required to divide an image into ``n`` parts. Return a tuple of integers in the format (num_columns, num_rows)
2.188852
2.170128
1.008628
# TODO: Refactor calculating layout to avoid repetition. columns, rows = calc_columns_rows(len(tiles)) tile_size = tiles[0].image.size return (tile_size[0] * columns, tile_size[1] * rows)
def get_combined_size(tiles)
Calculate combined size of tiles.
5.139082
4.598386
1.117584
TILE_LIMIT = 99 * 99 try: number_tiles = int(number_tiles) except: raise ValueError('number_tiles could not be cast to integer.') if number_tiles > TILE_LIMIT or number_tiles < 2: raise ValueError('Number of tiles must be between 2 and {} (you \ a...
def validate_image(image, number_tiles)
Basic sanity checks prior to performing a split.
3.636979
3.474274
1.046831
SPLIT_LIMIT = 99 try: col = int(col) row = int(row) except: raise ValueError('columns and rows values could not be cast to integer.') if col < 2: raise ValueError('Number of columns must be between 2 and {} (you \ asked for {}).'.format(SP...
def validate_image_col_row(image , col , row)
Basic checks for columns and rows values
3.477171
3.150446
1.103708
im = Image.open(filename) im_w, im_h = im.size columns = 0 rows = 0 if not number_tiles is None: validate_image(im, number_tiles) columns, rows = calc_columns_rows(number_tiles) extras = (columns * rows) - number_tiles else: validate_image_col_row(im, col, r...
def slice(filename, number_tiles=None, col=None, row=None, save=True)
Split an image into a specified number of tiles. Args: filename (str): The filename of the image to split. number_tiles (int): The number of tiles required. Kwargs: save (bool): Whether or not to save tiles to disk. Returns: Tuple of :class:`Tile` instances.
2.48473
2.516388
0.987419
# Causes problems in CLI script. # if not os.path.exists(directory): # os.makedirs(directory) for tile in tiles: tile.save(filename=tile.generate_filename(prefix=prefix, directory=directory, ...
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png')
Write image files to disk. Create specified folder(s) if they don't exist. Return list of :class:`Tile` instance. Args: tiles (list): List, tuple or set of :class:`Tile` objects to save. prefix (str): Filename prefix of saved tiles. Kwargs: directory (str): Directory to save til...
4.257836
4.692545
0.907362
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format( col=self.column, row=self.row, ext=format.lower().replace('jpeg', 'jpg')) if not path: return filename return os.path.join(directory, filename)
def generate_filename(self, directory=os.getcwd(), prefix='tile', format='png', path=True)
Construct and return a filename for this tile.
2.603615
2.464796
1.056321
tiles = [] for filename in filenames: row, column = os.path.splitext(filename)[0][-5:].split('_') tiles.append((int(row), int(column))) rows = [pos[0] for pos in tiles]; columns = [pos[1] for pos in tiles] num_rows = max(rows); num_columns = max(columns) return (num_columns, num...
def get_columns_rows(filenames)
Derive number of columns and rows from filenames.
2.716922
2.576337
1.054568
initial_state.inspect.b('mem_read', when=angr.BP_AFTER, action=_read_consolidate) initial_state.inspect.b('reg_read', when=angr.BP_AFTER, action=_read_consolidate)
def consolidate_reverse_exprs(initial_state)
Tries to simplify the Reverse(Extract(Reverse())) pattern in expressions. NOTE: Experimental! Maybe not working correctly, use it with care!
5.785494
6.02926
0.95957
size = expr.size() umin = umax = smin = smax = None if not sat_zero(se, expr): try: umin = se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0]) umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0]) return (um...
def get_signed_range(se, expr)
Calculate the range of the expression with signed boundaries
1.959705
1.961081
0.999298
status = True if not os.path.isfile(fn): status = False else: try: open(fn) except IOError: status = False return status
def fn_check_full(fn)
Check for file existence Avoids race condition, but slower than os.path.exists. Parameters ---------- fn : str Input filename string. Returns ------- status True if file exists, False otherwise.
3.858099
4.34154
0.888648
ds = None if fn_check(fn): ds = gdal.Open(fn, gdal.GA_ReadOnly) else: print("Unable to find %s" % fn) return ds
def fn_getds(fn)
Wrapper around gdal.Open()
3.055671
2.92826
1.043511
#Add check for filename existence ds = fn_getds(fn) out = ds_getma(ds, bnum=bnum) if return_ds: out = (out, ds) return out
def fn_getma(fn, bnum=1, return_ds=False)
Get masked array from input filename Parameters ---------- fn : str Input filename string bnum : int, optional Band number Returns ------- np.ma.array Masked array containing raster values
4.275695
4.857206
0.880279
b_ndv = get_ndv_b(b) #bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv) #This is more appropriate for float, handles precision issues bma = np.ma.masked_values(b.ReadAsArray(), b_ndv) return bma
def b_getma(b)
Get masked array from input GDAL Band Parameters ---------- b : gdal.Band Input GDAL Band Returns ------- np.ma.array Masked array containing raster values
5.499128
4.761898
1.154818
ns = src_ds.RasterXSize nl = src_ds.RasterYSize maxdim = float(maxdim) if scale is None: scale_ns = ns/maxdim scale_nl = nl/maxdim scale = max(scale_ns, scale_nl) #Need to check to make sure scale is positive real if scale > 1: ns = int(round(ns/scale)) ...
def get_sub_dim(src_ds, scale=None, maxdim=1024)
Compute dimensions of subsampled dataset Parameters ---------- ds : gdal.Dataset Input GDAL Datset scale : int, optional Scaling factor maxdim : int, optional Maximum dimension along either axis, in pixels Returns ------- ns Numper of samples in s...
2.867009
2.925652
0.979956
dtype = src_ds.GetRasterBand(1).DataType src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype) gt = np.array(src_ds.GetGeoTransform()) gt[[1,5]] = gt[[1,5]]*scale src_ds_sub.SetGeoTransform(list(gt)) src_ds_sub.SetProjection(src_ds.GetProjection()) ...
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False): #print src_ds.GetFileList()[0] b = src_ds.GetRasterBand(bnum) b_ndv = get_ndv_b(b) ns, nl, scale = get_sub_dim(src_ds, scale, maxdim) #The buf_size parameters determine the final array dimensions b_array = b.Re...
Load a subsampled array, rather than full resolution This is useful when working with large rasters Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method. Parameters ---------- ds : gdal.Dataset Input GDAL Datset bnum : int, optional Band number scale : int, o...
2.241666
2.512696
0.892136
out_vrt = os.path.splitext(out_csv)[0]+'.vrt' out_csv = os.path.split(out_csv)[-1] f = open(out_vrt, 'w') f.write('<OGRVRTDataSource>\n') f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0]) f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv) f.write(' ...
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2')
Write out a vrt to accompany a csv of points
1.37793
1.36945
1.006192
dt_dict = gdal_array.codes if isinstance(d, (np.ndarray, np.generic)): d = d.dtype #This creates dtype from another built-in type #d = np.dtype(d) if isinstance(d, np.dtype): if d.name == 'int8': gdal_dt = 1 elif d.name == 'bool': #Write o...
def np2gdal_dtype(d)
Get GDAL RasterBand datatype that corresponds with NumPy datatype Input should be numpy array or numpy dtype
4.049691
3.71173
1.091052
dt_dict = gdal_array.codes if isinstance(b, str): b = gdal.Open(b) if isinstance(b, gdal.Dataset): b = b.GetRasterBand(1) if isinstance(b, gdal.Band): b = b.DataType if isinstance(b, int): np_dtype = dt_dict[b] else: np_dtype = None print("Inp...
def gdal2np_dtype(b)
Get NumPy datatype that corresponds with GDAL RasterBand datatype Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
2.792116
2.318365
1.204347
b_ndv = b.GetNoDataValue() if b_ndv is None: #Check ul pixel for ndv ns = b.XSize nl = b.YSize ul = float(b.ReadAsArray(0, 0, 1, 1)) #ur = float(b.ReadAsArray(ns-1, 0, 1, 1)) lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1)) #ll = float(b.ReadAsArray(0, nl...
def get_ndv_b(b)
Get NoData value for GDAL band. If NoDataValue is not set in the band, extract upper left and lower right pixel values. Otherwise assume NoDataValue is 0. Parameters ---------- b : GDALRasterBand object This is the input band. Returns ------- b_ndv : float NoD...
2.843138
2.579488
1.10221
if logical: from multiprocessing import cpu_count ncpu=cpu_count() else: import psutil ncpu=psutil.cpu_count(logical=False) return ncpu
def cpu_count(logical=True)
Return system CPU count
2.532453
2.394395
1.057659
fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if not os.path.exists(fn): #Find appropriate urlretrieve for Python 2 and 3 try: from urllib.request import urlretrieve except ImportError: from urllib import urlretr...
def getfile(url, outdir=None)
Function to fetch files using urllib Works with ftp
3.10568
3.31171
0.937788
import requests print("Retrieving: %s" % url) fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if auth is not None: r = requests.get(url, stream=True, auth=auth) else: r = requests.get(url, stream=True) chunk_size = 1000000 wit...
def getfile2(url, auth=None, outdir=None)
Function to fetch files using requests Works with https authentication
1.748382
1.854755
0.942648
import getpass from requests.auth import HTTPDigestAuth #This binds raw_input to input for Python 2 input_func = input try: input_func = raw_input except NameError: pass uname = input_func("MODSCAG Username:") pw = getpass.getpass("MODSCAG Password:") auth = HTTP...
def get_auth()
Get authorization token for https
7.766786
7.535324
1.030717
import csv #Check first line for header with open(fn, 'r') as f: reader = csv.DictReader(f) hdr = reader.fieldnames #Assume there is a header on first line, check skiprows = 1 if np.all(f.isdigit() for f in hdr): hdr = None skiprows = 0 #Check header f...
def readcsv(fn)
Wrapper to read arbitrary csv, check for header Needs some work to be more robust, quickly added for demcoreg sampling
6.372752
6.050655
1.053233
print('Excluding values outside of range: {0:f} to {1:f}'.format(*rangelim)) out = np.ma.masked_outside(dem, *rangelim) out.set_fill_value(dem.fill_value) return out
def range_fltr(dem, rangelim)
Range filter (helper function)
2.982636
2.968512
1.004758
out = range_fltr(np.ma.abs(dem), *rangelim) #Apply mask to original input out = np.ma.array(dem, mask=np.ma.getmaskarray(out)) out.set_fill_value(dem.fill_value) return out
def absrange_fltr(dem, rangelim)
Absolute range filter
3.932856
3.817033
1.030344
rangelim = malib.calcperc(dem, perc) print('Excluding values outside of percentile range: {0:0.2f} to {1:0.2f}'.format(*perc)) out = range_fltr(dem, rangelim) return out
def perc_fltr(dem, perc=(1.0, 99.0))
Percentile filter
5.725267
5.827129
0.982519
std = dem.std() u = dem.mean() print('Excluding values outside of range: {1:0.2f} +/- {0}*{2:0.2f}'.format(n, u, std)) rangelim = (u - n*std, u + n*std) out = range_fltr(dem, rangelim) return out
def sigma_fltr(dem, n=3)
sigma * factor filter Useful for outlier removal These are min/max percentile ranges for different sigma values: 1: 15.865, 84.135 2: 2.275, 97.725 3: 0.135, 99.865
5.300261
5.462986
0.970213
mad, med = malib.mad(dem, return_med=True) print('Excluding values outside of range: {1:0.3f} +/- {0}*{2:0.3f}'.format(n, med, mad)) rangelim = (med - n*mad, med + n*mad) out = range_fltr(dem, rangelim) return out
def mad_fltr(dem, n=3)
Median absolute deviation * factor filter Robust outlier removal
4.85589
5.330053
0.91104
#import astropy.nddata import astropy.convolution dem = malib.checkma(dem) #Generate 2D gaussian kernel for input sigma and size #Default size is 8*sigma in x and y directions #kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian') #Size must be odd if size is not Non...
def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False)
Astropy gaussian filter properly handles convolution with NaN http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab width1 = 3; sigma1 = (width1-1) / 6; Specify width for smallest feature of interest and determine sigma appropriately sigma...
3.440786
3.461982
0.993878
dem = malib.checkma(dem) levels = int(np.floor(np.log2(size))) #print levels dim = np.floor(np.array(dem.shape)/float(2**levels) + 1)*(2**levels) #print dem.shape #print dim #Can do something with np.pad here #np.pad(a_fp.filled(), 1, mode='constant', constant_values=(a_fp.fill_valu...
def gauss_fltr_pyramid(dem, size=None, full=False, origmask=False)
Pyaramidal downsampling approach for gaussian smoothing Avoids the need for large kernels, very fast Needs testing
2.932641
2.944227
0.996065
import cv2 dem = malib.checkma(dem) dem_cv = cv2.GaussianBlur(dem.filled(np.nan), (size, size), sigma) out = np.ma.fix_invalid(dem_cv) out.set_fill_value(dem.fill_value) return out
def gauss_fltr_opencv(dem, size=3, sigma=1)
OpenCV Gaussian filter Still propagates NaN values
3.765524
3.367332
1.118252
smooth = gauss_fltr_astropy(dem, size=size) smooth[~dem.mask] = dem[~dem.mask] if newmask is not None: smooth = np.ma.array(smooth, mask=newmask) return smooth
def gaussfill(dem, size=3, newmask=None)
Gaussian filter with filling
3.707384
3.668993
1.010464
print("Applying median filter with size %s" % fsize) from scipy.ndimage.filters import median_filter dem_filt_med = median_filter(dem.filled(np.nan), fsize) #Now mask all nans out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value) if origmask: out = np.ma.array...
def median_fltr(dem, fsize=7, origmask=False)
Scipy.ndimage median filter Does not properly handle NaN
2.560878
2.656474
0.964014
import cv2 dem = malib.checkma(dem) if size > 5: print("Need to implement iteration") n = 0 out = dem while n <= iterations: dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size) out = np.ma.fix_invalid(dem_cv) out.set_fill_value(dem.fill_value...
def median_fltr_opencv(dem, size=3, iterations=1)
OpenCV median filter
4.550635
4.288352
1.061162
r = size/2 c = (r,r) y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]] mask = ~(x*x + y*y <= r*r) return mask
def circular_mask(size)
Create a circular mask for an array Useful when sampling rasters for a laser shot
2.739406
2.471936
1.108203
print("Applying rolling filter: %s with size %s" % (f.__name__, size)) dem = malib.checkma(dem) #Convert to float32 so we can fill with nan dem = dem.astype(np.float32) newshp = (dem.size, size*size) #Force a step size of 1 t = malib.sliding_window_padded(dem.filled(np.nan), (size, size...
def rolling_fltr(dem, f=np.nanmedian, size=3, circular=True, origmask=False)
General rolling filter (default operator is median filter) Can input any function f Efficient for smaller arrays, correclty handles NaN, fills gaps
3.527629
3.659809
0.963883
#Note, ndimage doesn't properly handle ma - convert to nan dem = malib.checkma(dem) dem = dem.astype(np.float64) #Mask islands if erode > 0: print("Eroding islands smaller than %s pixels" % (erode * 2)) dem = malib.mask_islands(dem, iterations=erode) print("Applying median ...
def median_fltr_skimage(dem, radius=3, erode=1, origmask=False)
Older skimage.filter.median_filter This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine
4.843401
4.824182
1.003984
print("Applying uniform filter with size %s" % fsize) #Note, ndimage doesn't properly handle ma - convert to nan from scipy.ndimage.filters import unifiform_filter dem_filt_med = uniform_filter(dem.filled(np.nan), fsize) #Now mask all nans out = np.ma.fix_invalid(dem_filt_med, copy=False, f...
def uniform_fltr(dem, fsize=7)
Uniform (mean) filter Note: suffers from significant ringing
5.7554
5.923364
0.971644
try: open(refdem_fn) except IOError: sys.exit('Unable to open reference DEM: %s' % refdem_fn) from pygeotools.lib import warplib dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first') dem = iolib.ds_getma(dem_ds) refdem...
def dz_fltr(dem_fn, refdem_fn, perc=None, rangelim=(0,30), smooth=False)
Absolute elevation difference range filter using values from a source raster file and a reference raster file
2.878839
2.869643
1.003204
if smooth: refdem = gauss_fltr_astropy(refdem) dem = gauss_fltr_astropy(dem) dz = refdem - dem #This is True for invalid values in DEM, and should be masked demmask = np.ma.getmaskarray(dem) if perc: dz_perc = malib.calcperc(dz, perc) print("Applying dz percen...
def dz_fltr_ma(dem, refdem, perc=None, rangelim=(0,30), smooth=False)
Absolute elevation difference range filter using values from a source array and a reference array
2.91877
2.942251
0.992019
import scipy.ndimage as ndimage print('Eroding pixels near nodata: %i iterations' % iterations) mask = np.ma.getmaskarray(dem) mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations) out = np.ma.array(dem, mask=mask_dilate) return out
def erode_edge(dem, iterations=1)
Erode pixels near nodata
3.670286
2.882302
1.273387
import scipy.signal import matplotlib.pyplot as plt #dt is 300 s, 5 min dt_diff = np.diff(dt_list) dt_diff = np.array([dt.total_seconds() for dt in dt_diff]) dt = malib.fast_median(dt_diff) #f is 0.00333 Hz #288 samples/day fs = 1./dt nyq = fs/2. if False: #psd,...
def butter(dt_list, val, lowpass=1.0)
This is framework for a butterworth bandpass for 1D data Needs to be cleaned up and generalized
2.485751
2.470677
1.006101
#Fill ndv with random data bf = malib.randomfill(bma) import scipy.fftpack f = scipy.fftpack.fft2(bf) ff = scipy.fftpack.fftshift(f) #Ben suggested a Hahn filter here, remove the low frequency, high amplitude information #Then do a second fft? #np.log(np.abs(ff)) #perc...
def freq_filt(bma)
This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end See separate utility freq_analysis.py
6.916913
6.875042
1.00609
from copy import deepcopy from pygeotools.lib import filtlib print("Copying original DEMStack") s = deepcopy(s_orig) s.stack_fn = os.path.splitext(s_orig.stack_fn)[0]+'_smooth%ipx.npz' % size #Loop through each array and smooth print("Smoothing all arrays in stack with %i px gaussian f...
def stack_smooth(s_orig, size=7, save=False)
Run Gaussian smoothing filter on exising stack object
4.213803
4.022777
1.047486
#Should check for valid extent #This is not memory efficient, but is much simpler #To be safe, if we are saving out, create a copy to avoid overwriting if copy or save: from copy import deepcopy print("Copying original DEMStack") s = deepcopy(s_orig) else: #Want...
def stack_clip(s_orig, extent, out_stack_fn=None, copy=True, save=False)
Create a new stack object with limited extent from an exising stack object
4.115426
4.125713
0.997507
#This must be a numpy boolean array idx = np.array(idx) if np.any(idx): #This is not memory efficient, but is much simpler #To be safe, if we are saving out, create a copy to avoid overwriting if copy or save: from copy import deepcopy print("Copying orig...
def get_stack_subset(s_orig, idx, out_stack_fn=None, copy=True, save=False)
Create a new stack object as a subset of an exising stack object
3.526267
3.539918
0.996144
from pygeotools.lib import geolib from copy import deepcopy #Assumes input stacks have identical extent, resolution, and projection if s1.ma_stack.shape[1:3] != s2.ma_stack.shape[1:3]: print(s1.ma_stack.shape) print(s2.ma_stack.shape) sys.exit('Input stacks must have identic...
def stack_merge(s1, s2, out_stack_fn=None, sort=True, save=False)
Merge two stack objects
2.862119
2.832797
1.010351
a = checkma(a) #For data that have already been normalized, #This provides a proper normal distribution with mean=0 and std=1 #a = (a - a.mean()) / a.std() #noise = a.mask * (np.random.randn(*a.shape)) noise = a.mask * np.random.normal(a.mean(), a.std(), a.shape) #Add the noise b = ...
def randomfill(a)
Fill masked areas with random noise This is needed for any fft-based operations
5.143744
4.931283
1.043084
a = checkma(a) ndv = a.fill_value #Note: The following fails for arrays that are not float (np.nan is float) b = f_a(a.filled(np.nan), *args, **kwargs) #the fix_invalid fill_value parameter doesn't seem to work out = np.ma.fix_invalid(b, copy=False) out.set_fill_value(ndv) return ...
def nanfill(a, f_a, *args, **kwargs)
Fill masked areas with np.nan Wrapper for functions that can't handle ma (e.g. scipy.ndimage) This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
5.881074
5.864095
1.002895
a = checkma(a) #return scoreatpercentile(a.compressed(), 50) if a.count() > 0: out = np.percentile(a.compressed(), 50) else: out = np.ma.masked return out
def fast_median(a)
Fast median operation for masked array using 50th-percentile
4.558294
3.793472
1.201615
a = checkma(a) #return np.ma.median(np.fabs(a - np.ma.median(a))) / c if a.count() > 0: if axis is None: med = fast_median(a) out = fast_median(np.fabs(a - med)) * c else: med = np.ma.median(a, axis=axis) #This is necessary for broadcastin...
def mad(a, axis=None, c=1.4826, return_med=False)
Compute normalized median absolute difference Can also return median array, as this can be expensive, and often we want both med and nmad Note: 1.4826 = 1/0.6745
2.684329
2.736522
0.980927
b = checkma(b) if b.count() > 0: #low = scoreatpercentile(b.compressed(), perc[0]) #high = scoreatpercentile(b.compressed(), perc[1]) low = np.percentile(b.compressed(), perc[0]) high = np.percentile(b.compressed(), perc[1]) else: low = 0 high = 0 #l...
def calcperc(b, perc=(0.1,99.9))
Calculate values at specified percentiles
2.502589
2.380762
1.051172
clim = np.max(np.abs(calcperc(b, perc))) #clim = (-clim, clim) return -clim, clim
def calcperc_sym(b, perc=(0.1,99.9))
Get symmetrical percentile values Useful for determining clim centered on 0 for difference maps
6.909949
5.32976
1.296484
b = checkma(b) low, high = calcperc(b, perc) return low, high, high - low
def iqr(b, perc=(25, 75))
Inter-quartile range
9.264363
8.208624
1.128613
from scipy.stats.mstats import mode a = checkma(a) thresh = 4E6 if full or a.count() < thresh: q = (iqr(a)) p16, p84, spread = robust_spread(a) #There has to be a better way to compute the mode for a ma #mstats.mode returns tuple of (array[mode], array[count]) ...
def get_stats(a, full=False)
Compute and print statistics for input array Needs to be cleaned up, return a stats object
3.723031
3.6597
1.017305
d = {} a = checkma(a_in) d['count'] = a.count() thresh = 4E6 if not full and d['count'] > thresh: a = a.compressed() stride = int(np.around(a.size / thresh)) #a = np.ma.array(a[::stride]) a = a[::stride] d['min'] = a.min() d['max'] = a.max() d['ptp'] ...
def get_stats_dict(a_in, full=True)
Compute and print statistics for input array
3.184406
3.154944
1.009338
import matplotlib.pyplot as plt import imview.imviewer as imview b = checkma(b) #if hasattr(kwargs,'imshow_kwargs'): # kwargs['imshow_kwargs']['interpolation'] = 'bicubic' #else: # kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'} #bma_fig(fig, bma, cmap='gist_rainbow_r'...
def iv(b, **kwargs)
Quick access to imview for interactive sessions
5.003089
4.519887
1.106906
from numpy.lib.stride_tricks import as_strided as ast # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape= (A.shape[0]/ block[0], A.shape[1]/ block[1])+ block strides= (block[0]* A.strides[0], block[1]* A.s...
def block_view(A, block=(3, 3))
Provide a 2D block view to 2D array. No error checking made. Therefore meaningful (as implemented) only for blocks strictly compatible with the shape of A.
6.044739
5.970509
1.012433
from numpy.lib.stride_tricks import as_strided as ast ''' Return a sliding window over a in any number of dimensions Parameters: a - an n-dimensional numpy array ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size of each dimension of the window ...
def sliding_window(a, ws, ss=None, flatten=True)
Return a sliding window over a in any number of dimensions Parameters: a - an n-dimensional numpy array ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size of each dimension of the window ss - an int (a is 1D) or tuple (a is 2D or greater) representi...
3.49118
2.72681
1.280317
''' Normalize numpy array shapes so they're always expressed as a tuple, even for one-dimensional shapes. Parameters shape - an int, or a tuple of ints Returns a shape tuple ''' try: i = int(shape) return (i,) except TypeError: # shape ...
def norm_shape(shape)
Normalize numpy array shapes so they're always expressed as a tuple, even for one-dimensional shapes. Parameters shape - an int, or a tuple of ints Returns a shape tuple
5.135561
2.338919
2.195699
local_srs = osr.SpatialReference() local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon) local_srs.ImportFromProj4(local_proj) return local_srs
def localortho(lon, lat)
Create srs for local orthographic projection centered at lat, lon
2.19595
1.923092
1.141885
cx, cy = geom.Centroid().GetPoint_2D() lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs) local_srs = localortho(lon,lat) local_geom = geom_dup(geom) geom_transform(local_geom, local_srs) return local_geom
def geom2localortho(geom)
Convert existing geom to local orthographic projection Useful for local cartesian distance/area calculations
7.265897
7.491981
0.969823
lat = np.array(lat) if np.any(lat > 0): m70_t70 = 1.9332279 #Hack to deal with pole lat[lat>=90.0] = 89.999999999 else: # for 71 deg, southern PS -- checked BS 5/2012 m70_t70 = 1.93903005 lat[lat<=-90.0] = -89.999999999 #for WGS84, a=6378137, 1/f...
def scale_ps(lat)
This function calculates the scaling factor for a polar stereographic projection (ie. SSM/I grid) to correct area calculations. The scaling factor is defined (from Snyder, 1982, Map Projections used by the U.S. Geological Survey) as: k = (mc/m)*(t/tc), where: m = cos(lat)/sqrt(1 - e2*sin(lat)^2) ...
4.241032
3.442945
1.231803
if np.any(lon > 360.0) or np.any(lon < 0.0): print("Warning: lon outside expected range") lon = wraplon(lon) #lon[lon > 180.0] -= 360.0 #lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180 lon = lon - (lon.astype(int)/180)*360.0 return lon
def lon360to180(lon)
Convert longitude from (0, 360) to (-180, 180)
2.847189
2.803282
1.015663
if np.any(lon > 180.0) or np.any(lon < -180.0): print("Warning: lon outside expected range") lon = lon360to180(lon) #lon[lon < 0.0] += 360.0 lon = (lon + 360.0) % 360.0 return lon
def lon180to360(lon)
Convert longitude from (-180, 180) to (0, 360)
2.47852
2.363821
1.048523
n = dd < 0 dd = abs(dd) m,s = divmod(dd*3600,60) d,m = divmod(m,60) if n: d = -d return d,m,s
def dd2dms(dd)
Convert decimal degrees to degrees, minutes, seconds
2.301019
2.266172
1.015377
if d < 0: sign = -1 else: sign = 1 dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600) return dd
def dms2dd(d,m,s)
Convert degrees, minutes, seconds to decimal degrees
2.074818
2.105901
0.98524
d,m,s = dd2dms(dd) m = m + float(s)/3600 return d,m,s
def dd2dm(dd)
Convert decimal to degrees, decimal minutes
4.008577
3.880249
1.033072
mX = np.asarray(mX) mY = np.asarray(mY) if geoTransform[2] + geoTransform[4] == 0: pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5 pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5 else: pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform)) #retu...
def mapToPixel(mX, mY, geoTransform)
Convert map coordinates to pixel coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
2.26269
2.379038
0.951094
pX = np.asarray(pX, dtype=float) pY = np.asarray(pY, dtype=float) pX += 0.5 pY += 0.5 mX, mY = applyGeoTransform(pX, pY, geoTransform) return mX, mY
def pixelToMap(pX, pY, geoTransform)
Convert pixel coordinates to map coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
2.039641
2.369252
0.86088
import scipy.stats as stats extent = ds_extent(ds) #[[xmin, xmax], [ymin, ymax]] range = [[extent[0], extent[2]], [extent[1], extent[3]]] if bins is None: bins = (ds.RasterXSize, ds.RasterYSize) if stat == 'max': stat = np.max elif stat == 'min': stat = np.min ...
def block_stats(x,y,z,ds,stat='median',bins=None)
Compute points on a regular grid (matching input GDAL Dataset) from scattered point data using specified statistic Wrapper for scipy.stats.binned_statistic_2d Note: this is very fast for mean, std, count, but bignificantly slower for median
2.577686
2.453766
1.050502
mx, my, mz = block_stats(x,y,z,ds,stat) gt = ds.GetGeoTransform() pX, pY = mapToPixel(mx, my, gt) shape = (ds.RasterYSize, ds.RasterXSize) ndv = -9999.0 a = np.full(shape, ndv) a[pY.astype('int'), pX.astype('int')] = mz return np.ma.masked_equal(a, ndv)
def block_stats_grid(x,y,z,ds,stat='median')
Fill regular grid (matching input GDAL Dataset) from scattered point data using specified statistic
2.977822
2.981023
0.998926
#These round down to int #dst_ns = int((extent[2] - extent[0])/res) #dst_nl = int((extent[3] - extent[1])/res) #This should pad by 1 pixel, but not if extent and res were calculated together to give whole int dst_ns = int((extent[2] - extent[0])/res + 0.99) dst_nl = int((extent[3] - extent[...
def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
Create a new GDAL Dataset in memory Useful for various applications that require a Dataset
2.678838
2.785623
0.961665
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly) dst_ds = gdal.Open(dst_fn, gdal.GA_Update) dst_ds.SetProjection(src_ds.GetProjection()) if gt: src_gt = np.array(src_ds.GetGeoTransform()) src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize]) dst_dim = np.array([dst_ds.Ras...
def copyproj(src_fn, dst_fn, gt=True)
Copy projection and geotransform from one raster file to another
2.076875
2.072164
1.002274
g = ogr.CreateGeometryFromWkt(geom.ExportToWkt()) g.AssignSpatialReference(geom.GetSpatialReference()) return g
def geom_dup(geom)
Create duplicate geometry Needed to avoid segfault when passing geom around. See: http://trac.osgeo.org/gdal/wiki/PythonGotchas
2.586447
2.101253
1.230907
s_srs = geom.GetSpatialReference() if not s_srs.IsSame(t_srs): ct = osr.CoordinateTransformation(s_srs, t_srs) geom.Transform(ct) geom.AssignSpatialReference(t_srs)
def geom_transform(geom, t_srs)
Transform a geometry in place
2.034426
2.044029
0.995302
from pygeotools.lib import timelib ds = ogr.Open(shp_fn) lyr = ds.GetLayer() nfeat = lyr.GetFeatureCount() print('%i input features\n' % nfeat) if fields is None: fields = shp_fieldnames(lyr) d_list = [] for n,feat in enumerate(lyr): d = {} if geom: ...
def shp_dict(shp_fn, fields=None, geom=True)
Get a dictionary for all features in a shapefile Optionally, specify fields
3.227973
3.358415
0.961159
#Need to check t_srs s_srs = lyr.GetSpatialRef() cT = osr.CoordinateTransformation(s_srs, t_srs) #Do everything in memory drv = ogr.GetDriverByName('Memory') #Might want to save clipped, warped shp to disk? # create the output layer #drv = ogr.GetDriverByName('ESRI Shapefile') ...
def lyr_proj(lyr, t_srs, preserve_fields=True)
Reproject an OGR layer
2.430857
2.420385
1.004327
shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() #This returns xmin, ymin, xmax, ymax shp_extent = lyr_extent(lyr) shp_srs = lyr.GetSpatialRef() # dst_dt = gdal.GDT_Byte ndv = 0 if r_ds is not None: r_extent = ds_extent(r_ds) res = get_res(r_ds, square=True)[0] ...
def shp2array(shp_fn, r_ds=None, res=None, extent=None, t_srs=None)
Rasterize input shapefile to match existing raster Dataset (or specified res/extent/t_srs)
2.807976
2.76982
1.013776
from pygeotools.lib import iolib from pygeotools.lib import warplib r_ds = iolib.fn_getds(r_fn) r_srs = get_ds_srs(r_ds) r_extent = ds_extent(r_ds) r_extent_geom = bbox2geom(r_extent) #NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example ...
def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False)
Clip an input raster by input polygon shapefile for given extent
3.5508
3.554106
0.99907
ds = ogr.Open(shp_fn) lyr = ds.GetLayer() srs = lyr.GetSpatialRef() lyr.ResetReading() geom_list = [] for feat in lyr: geom = feat.GetGeometryRef() geom.AssignSpatialReference(srs) #Duplicate the geometry, or segfault #See: http://trac.osgeo.org/gdal/wiki/Pyt...
def shp2geom(shp_fn)
Extract geometries from input shapefile Need to handle multi-part geom: http://osgeo-org.1560.x6.nabble.com/Multipart-to-singlepart-td3746767.html
3.14789
3.079349
1.022258