code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1100), needed for site response # term calculation pga1100 = np.exp(self._...
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.264071
3.233761
1.009373
c1 = self.CONSTS['c1'] R = np.sqrt(dists.rrup ** 2 + self.CONSTS['c4'] ** 2) base_term = (C['a1'] + C['a8'] * ((8.5 - rup.mag) ** 2) + (C['a2'] + self.CONSTS['a3'] * (rup.mag - c1)) * np.log(R)) if rup.mag <= c1: ...
def _compute_base_term(self, C, rup, dists)
Compute and return base model term, that is the first term in equation 1, page 74. The calculation of this term is explained in paragraph 'Base Model', page 75.
2.846616
2.826943
1.006959
# ranges of rake values for each faulting mechanism are specified in # table 2, page 75 return (C['a12'] * float(rup.rake > 30 and rup.rake < 150) + C['a13'] * float(rup.rake > -120 and rup.rake < -60))
def _compute_faulting_style_term(self, C, rup)
Compute and return faulting style term, that is the sum of the second and third terms in equation 1, page 74.
5.056424
4.503536
1.122768
site_resp_term = np.zeros_like(sites.vs30) vs30_star, _ = self._compute_vs30_star_factor(imt, sites.vs30) vlin, c, n = C['VLIN'], self.CONSTS['c'], self.CONSTS['n'] a10, b = C['a10'], C['b'] idx = sites.vs30 < vlin arg = vs30_star[idx] / vlin site_resp_...
def _compute_site_response_term(self, C, imt, sites, pga1100)
Compute and return site response model term, that is the fifth term in equation 1, page 74.
2.952821
2.920726
1.010989
if rup.dip == 90.0: return np.zeros_like(dists.rx) else: idx = dists.rx > 0 Fhw = np.zeros_like(dists.rx) Fhw[idx] = 1 # equation 8, page 77 T1 = np.zeros_like(dists.rx) idx1 = (dists.rjb < 30.0) & (idx) ...
def _compute_hanging_wall_term(self, C, dists, rup)
Compute and return hanging wall model term, that is the sixth term in equation 1, page 74. The calculation of this term is explained in paragraph 'Hanging-Wall Model', page 77.
2.404232
2.356169
1.020399
if rup.ztor >= 10.0: return C['a16'] else: return C['a16'] * rup.ztor / 10.0
def _compute_top_of_rupture_depth_term(self, C, rup)
Compute and return top of rupture depth term, that is the seventh term in equation 1, page 74. The calculation of this term is explained in paragraph 'Depth-to-Top of Rupture Model', page 78.
4.835664
4.239858
1.140525
# equation 15, page 79 if rup.mag < 5.5: T6 = 1.0 elif rup.mag >= 5.5 and rup.mag <= 6.5: T6 = 0.5 * (6.5 - rup.mag) + 0.5 else: T6 = 0.5 # equation 14, page 79 large_distance_term = np.zeros_like(dists.rrup) idx = dis...
def _compute_large_distance_term(self, C, dists, rup)
Compute and return large distance model term, that is the 8-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Large Distance Model', page 78.
2.780558
2.621315
1.06075
a21 = self._compute_a21_factor(C, imt, z1pt0, vs30) a22 = self._compute_a22_factor(imt) median_z1pt0 = self._compute_median_z1pt0(vs30) soil_depth_term = a21 * np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) ...
def _compute_soil_depth_term(self, C, imt, z1pt0, vs30)
Compute and return soil depth model term, that is the 9-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Soil Depth Model', page 79.
2.422168
2.413678
1.003518
vs30_1100 = np.zeros_like(sites.vs30) + 1100 vs30_star, _ = self._compute_vs30_star_factor(imt, vs30_1100) C = self.COEFFS[imt] mean = (self._compute_base_term(C, rup, dists) + self._compute_faulting_style_term(C, rup) + self._compute_hanging_wall...
def _compute_imt1100(self, imt, sites, rup, dists)
Compute and return mean imt value for rock conditions (vs30 = 1100 m/s)
3.708989
3.597712
1.03093
std_intra = self._compute_intra_event_std(C, C_PGA, pga1100, rup.mag, sites.vs30, sites.vs30measured) std_inter = self._compute_inter_event_std(C, C_PGA, pga1100, rup.mag, ...
def _get_stddevs(self, C, C_PGA, pga1100, rup, sites, stddev_types)
Return standard deviations as described in paragraph 'Equations for standard deviation', page 81.
1.659505
1.581989
1.048999
sigma_b = self._compute_sigma_b(C, mag, vs30measured) sigma_b_pga = self._compute_sigma_b(C_PGA, mag, vs30measured) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_intra = np.sqrt(sigma_b ** 2 + self.CONSTS['sigma_amp'] ** 2 + ...
def _compute_intra_event_std(self, C, C_PGA, pga1100, mag, vs30, vs30measured)
Compute intra event standard deviation (equation 24) as described in the errata and not in the original paper.
3.025318
2.980466
1.015049
tau_0 = self._compute_std_0(C['s3'], C['s4'], mag) tau_b_pga = self._compute_std_0(C_PGA['s3'], C_PGA['s4'], mag) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_inter = np.sqrt(tau_0 ** 2 + (delta_amp ** 2) * (tau_b_pga ** 2) + ...
def _compute_inter_event_std(self, C, C_PGA, pga1100, mag, vs30)
Compute inter event standard deviation, equation 25, page 82.
3.449772
3.345113
1.031287
sigma_0 = self._compute_sigma_0(C, mag, vs30measured) sigma_amp = self.CONSTS['sigma_amp'] return np.sqrt(sigma_0 ** 2 - sigma_amp ** 2)
def _compute_sigma_b(self, C, mag, vs30measured)
Equation 23, page 81.
3.46076
3.217681
1.075545
s1 = np.zeros_like(vs30measured, dtype=float) s2 = np.zeros_like(vs30measured, dtype=float) idx = vs30measured == 1 s1[idx] = C['s1mea'] s2[idx] = C['s2mea'] idx = vs30measured == 0 s1[idx] = C['s1est'] s2[idx] = C['s2est'] return self....
def _compute_sigma_0(self, C, mag, vs30measured)
Equation 27, page 82.
2.329428
2.283804
1.019977
if mag < 5: return c1 elif mag >= 5 and mag <= 7: return c1 + (c2 - c1) * (mag - 5) / 2 else: return c2
def _compute_std_0(self, c1, c2, mag)
Common part of equations 27 and 28, pag 82.
2.533193
2.393905
1.058185
delta_amp = np.zeros_like(vs30) vlin = C['VLIN'] c = self.CONSTS['c'] b = C['b'] n = self.CONSTS['n'] idx = vs30 < vlin delta_amp[idx] = (- b * pga1100[idx] / (pga1100[idx] + c) + b * pga1100[idx] / (pga1100[idx] + c * ...
def _compute_partial_derivative_site_amp(self, C, pga1100, vs30)
Partial derivative of site amplification term with respect to PGA on rock (equation 26), as described in the errata and not in the original paper.
3.359183
3.359813
0.999812
e2 = self._compute_e2_factor(imt, vs30) a21 = e2.copy() vs30_star, v1 = self._compute_vs30_star_factor(imt, vs30) median_z1pt0 = self._compute_median_z1pt0(vs30) numerator = ((C['a10'] + C['b'] * self.CONSTS['n']) * np.log(vs30_star / np.min([v1, 1...
def _compute_a21_factor(self, C, imt, z1pt0, vs30)
Compute and return a21 factor, equation 18, page 80.
3.299549
3.253516
1.014149
v1 = self._compute_v1_factor(imt) vs30_star = vs30.copy() vs30_star[vs30_star >= v1] = v1 return vs30_star, v1
def _compute_vs30_star_factor(self, imt, vs30)
Compute and return vs30 star factor, equation 5, page 77.
3.179237
3.000199
1.059676
if imt.name == "SA": t = imt.period if t <= 0.50: v1 = 1500.0 elif t > 0.50 and t <= 1.0: v1 = np.exp(8.0 - 0.795 * np.log(t / 0.21)) elif t > 1.0 and t < 2.0: v1 = np.exp(6.76 - 0.297 * np.log(t)) ...
def _compute_v1_factor(self, imt)
Compute and return v1 factor, equation 6, page 77.
2.811533
2.713024
1.03631
e2 = np.zeros_like(vs30) if imt.name == "PGV": period = 1 elif imt.name == "PGA": period = 0 else: period = imt.period if period < 0.35: return e2 else: idx = vs30 <= 1000 if period >= 0.35...
def _compute_e2_factor(self, imt, vs30)
Compute and return e2 factor, equation 19, page 80.
2.133896
2.114032
1.009397
z1pt0_median = np.zeros_like(vs30) + 6.745 idx = np.where((vs30 >= 180.0) & (vs30 <= 500.0)) z1pt0_median[idx] = 6.745 - 1.35 * np.log(vs30[idx] / 180.0) idx = vs30 > 500.0 z1pt0_median[idx] = 5.394 - 4.48 * np.log(vs30[idx] / 500.0) return np.exp(z1pt0_median...
def _compute_median_z1pt0(self, vs30)
Compute and return median z1pt0 (in m), equation 17, pqge 79.
2.236836
2.151472
1.039677
if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
def _compute_a22_factor(self, imt)
Compute and return the a22 factor, equation 20, page 80.
3.627638
3.298905
1.099649
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) mean = np.zeros_like(sites.vs30) stddevs = [np.zeros_like(sites.vs30) for _ in stddev_types] idx_rock = sites.vs30 >= self.ROCK_VS30 idx_soil = sites...
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
1.765536
1.778346
0.992796
mean[idx] = (A1 + A2 * mag + C['C1'] + C['C2'] * (A3 - mag) ** 3 + C['C3'] * np.log(rrup[idx] + A4 * np.exp(A5 * mag)) + A6 * hypo_depth)
def _compute_mean(self, C, A1, A2, A3, A4, A5, A6, mag, hypo_depth, rrup, mean, idx)
Compute mean for subduction interface events, as explained in table 2, page 67.
3.947626
3.985352
0.990534
if mag > 8.0: mag = 8.0 for stddev in stddevs: stddev[idx] += C['C4'] + C['C5'] * mag
def _compute_std(self, C, mag, stddevs, idx)
Compute total standard deviation, as explained in table 2, page 67.
5.626661
5.187365
1.084686
mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) # this is the firm ground adjustment mean += np.log(1.162) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.228964
4.907959
0.861654
''' Creates the map according to the input configuration ''' if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Upper limit of long is smaller than lower limit') if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Up...
def _build_basemap(self)
Creates the map according to the input configuration
2.094482
2.020851
1.036436
self.fig.savefig(filename, dpi=self.dpi, format=filetype, papertype=papertype)
def savemap(self, filename, filetype='png', papertype="a4")
Save the figure
2.78257
2.767502
1.005444
''' :param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param dict config: Configuration parameters of the algorithm, containing the following information: 'min_lat' Minimum val...
def add_catalogue(self, catalogue, overlay=False)
:param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param dict config: Configuration parameters of the algorithm, containing the following information: 'min_lat' Minimum value of latitude (in d...
2.411877
1.841227
1.309929
lons = np.hstack([source.geometry.lons, source.geometry.lons[0]]) lats = np.hstack([source.geometry.lats, source.geometry.lats[0]]) x, y = self.m(lons, lats) self.m.plot(x, y, border, linewidth=border_width)
def _plot_area_source(self, source, border='k-', border_width=1.0)
Plots the area source :param source: Area source as instance of :class: mtkAreaSource :param str border: Line properties of border (see matplotlib documentation for detail) :param float border_width: Line width of border (see matplotlib documentation for detai...
2.150619
2.471701
0.870097
x, y = self.m(source.geometry.longitude, source.geometry.latitude) self.m.plot(x, y, point_marker, markersize=point_size)
def _plot_point_source(self, source, point_marker='ks', point_size=2.0)
Plots the area source :param source: Area source as instance of :class: mtkPointSource :param str point_marker: Marker style for point (see matplotlib documentation for detail) :param float marker size for point: Line width of border (see matplotlib documentat...
3.707345
4.039147
0.917853
# Get the trace trace_lons = np.array([pnt.longitude for pnt in source.fault_trace.points]) trace_lats = np.array([pnt.latitude for pnt in source.fault_trace.points]) surface_projection = _fault_polygon_from_mesh(sour...
def _plot_simple_fault(self, source, border='k-', border_width=1.0)
Plots the simple fault source as a composite of the fault trace and the surface projection of the fault. :param source: Fault source as instance of :class: mtkSimpleFaultSource :param str border: Line properties of border (see matplotlib documentation for detail) ...
3.102494
2.786546
1.113383
if not max_depth: max_depth = 70. # Get outline top_edge = np.column_stack([source.geometry.mesh.lons[0], source.geometry.mesh.lats[0]]) bottom_edge = np.column_stack([source.geometry.mesh.lons[-1][::-1], ...
def _plot_complex_fault(self, source, border='k-', border_width=1.0, min_depth=0., max_depth=None, alpha=1.0)
Plots the simple fault source as a composite of the fault trace and the surface projection of the fault. :param source: Fault source as instance of :class: mtkSimpleFaultSource :param str border: Line properties of border (see matplotlib documentation for detail) ...
2.280967
2.376998
0.9596
for source in model.sources: if isinstance(source, mtkAreaSource): self._plot_area_source(source, area_border, border_width) elif isinstance(source, mtkPointSource): self._plot_point_source(source, point_marker, point_size) elif isinst...
def add_source_model( self, model, area_border='k-', border_width=1.0, point_marker='ks', point_size=2.0, overlay=False, min_depth=0., max_depth=None, alpha=1.0)
Adds a source model to the map :param model: Source model of mixed typologies as instance of :class: openquake.hmtk.sources.source_model.mtkSourceModel
2.190603
2.007307
1.091314
if not norm: norm = Normalize(vmin=np.min(data), vmax=np.max(data)) x, y, = self.m(longitude, latitude) mappable = self.m.scatter(x, y, marker=shape, s=size, c=data, ...
def add_colour_scaled_points(self, longitude, latitude, data, shape='s', alpha=1.0, size=20, norm=None, overlay=False)
Overlays a set of points on a map with a fixed size but colour scaled according to the data :param np.ndarray longitude: Longitude :param np.ndarray latitude: Latitude :param np.ndarray data: Data for plotting :param str shape: Mar...
2.34788
2.53197
0.927294
if logplot: data = np.log10(data.copy()) x, y, = self.m(longitude, latitude) self.m.scatter(x, y, marker=shape, s=(smin + data ** sscale), c=colour, alpha=alpha, ...
def add_size_scaled_points( self, longitude, latitude, data, shape='o', logplot=False, alpha=1.0, colour='b', smin=2.0, sscale=2.0, overlay=False)
Plots a set of points with size scaled according to the data :param bool logplot: Choose to scale according to the logarithm (base 10) of the data :param float smin: Minimum scale size :param float sscale: Scaling factor
3.079197
3.875603
0.794508
longitude = catalogue.data['longitude'] latitude = catalogue.data['latitude'] strike = catalogue.data['strike1'] dip = catalogue.data['dip1'] rake = catalogue.data['rake1'] if not magnitude or (magnitude < 0): magnitude = catalogue.data['magnitude'] ...
def add_focal_mechanism(self, catalogue, magnitude=None, overlay=True)
Plots a the the focal mechanism based on the beachball representation. The focal_menchanism flag must contain: strike, dip, rake.
2.448153
2.347771
1.042756
# Create simple magnitude scaled point basemap self.add_size_scaled_points(catalogue.data['longitude'], catalogue.data['latitude'], catalogue.data['magnitude'], shape="o", ...
def add_catalogue_cluster(self, catalogue, vcl, flagvector, cluster_id=None, overlay=True)
Creates a plot of a catalogue showing where particular clusters exist
3.028002
3.10589
0.974923
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mean = self._get_mean( C, rup.mag, rup.rake, rup.dip, dists.rrup, dists.rjb ) stddevs = self._get_stddevs(C, rup.mag,...
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
2.211419
2.242059
0.986334
f1 = self._compute_magnitude_scaling(C, mag) f2 = self._compute_distance_scaling(C, mag, rrup) f3 = self._compute_faulting_mechanism(C, rake, dip) f4 = self._compute_far_source_soil_effect(C) f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag) mean = ...
def _get_mean(self, C, mag, rake, dip, rrup, rjb)
Return mean value (eq. 1, page 319).
3.366467
3.257702
1.033387
std = C['c16'] + np.zeros(num_sites) if mag < 7.4: std -= 0.07 * mag else: std -= 0.518 # only the 'total' standard deviation is supported, therefore the # std is always the same for all types stddevs = [std for _ in stddev_types] ...
def _get_stddevs(self, C, mag, stddev_types, num_sites)
Return standard deviation as defined in eq.11 page 319.
6.884762
6.540948
1.052563
g = C['c5'] + C['c6'] * 0.5 + C['c7'] * 0.5 return ( rrup ** 2 + (np.exp(C['c8'] * mag + C['c9'] * (8.5 - mag) ** 2) * g) ** 2 )
def _compute_distance_scaling(self, C, mag, rrup)
Compute distance scaling term (eq.3, page 319). The distance scaling assumes the near-source effect of local site conditions due to 50% very firm soil and soft rock and 50% firm rock.
4.695154
4.986691
0.941537
# flag for reverse faulting frv = float((dip > 45) and (22.5 <= rake <= 157.5)) # flag for thrust faulting fth = float((dip <= 45) and (22.5 <= rake <= 157.5)) return C['c10'] * frv + C['c11'] * fth
def _compute_faulting_mechanism(self, C, rake, dip)
Compute faulting mechanism term (see eq. 5, page 319). Reverse faulting is defined as occurring on steep faults (dip > 45) and rake in (22.5, 157.5). Thrust faulting is defined as occurring on shallow dipping faults (dip <=45) and rake in (22.5, 157.5)
4.55581
2.916337
1.562169
# eq. 8 (to be noticed that the USGS-NSHMP implementation defines # the hanging-wall term for all rjb distances, while in the original # manuscript, hw is computed only for rjb < 5). Again the 'firm rock' # is considered hw = np.zeros_like(rjb) if dip <= 70.: ...
def _compute_hanging_wall_effect(self, C, rjb, rrup, dip, mag)
Compute hanging-wall effect (see eq. 7, 8, 9 and 10 page 319). Considers correct version of equation 8 as given in the erratum and not in the original paper.
7.293806
6.899342
1.057174
name = method.__name__ def newmethod(self): try: val = self.__dict__[name] except KeyError: val = method(self) self.__dict__[name] = val return val newmethod.__name__ = method.__name__ newmethod.__doc__ = method.__doc__ return propert...
def cached_property(method)
:param method: a method without arguments except self :returns: a cached property
1.870785
2.10667
0.888029
known = set() outlist = [] for key in keys: if key not in known: outlist.append(key) known.add(key) return outlist
def distinct(keys)
Return the distinct keys in order.
2.706611
2.517574
1.075087
assert b > 0, b return int(math.ceil(float(a) / b))
def ceil(a, b)
Divide a / b and return the biggest integer close to the quotient. :param a: a number :param b: a positive number :returns: the biggest integer close to the quotient
4.078199
5.860004
0.695938
if max_weight <= 0: raise ValueError('max_weight=%s' % max_weight) ws = WeightedSequence([]) prev_key = 'Unspecified' for item in items: w = weight(item) k = key(item) if w < 0: # error raise ValueError('The item %r got a negative weight %s!' % ...
def block_splitter(items, max_weight, weight=lambda item: 1, key=nokey)
:param items: an iterator over items :param max_weight: the max weight to split on :param weight: a function returning the weigth of a given item :param key: a function returning the kind of a given item Group together items of the same kind until the total weight exceeds the `max_weight` and yield...
3.302151
3.461693
0.953912
assert number > 0, number assert num_slices > 0, num_slices blocksize = int(math.ceil(number / num_slices)) slices = [] start = 0 while True: stop = min(start + blocksize, number) slices.append(slice(start, stop)) if stop == number: break start +=...
def split_in_slices(number, num_slices)
:param number: a positive number to split in slices :param num_slices: the number of slices to return (at most) :returns: a list of slices >>> split_in_slices(4, 2) [slice(0, 2, None), slice(2, 4, None)] >>> split_in_slices(5, 1) [slice(0, 5, None)] >>> split_in_slices(5, 2) [slice(0, 3...
2.087048
2.364756
0.882564
if isinstance(sequence, int): return split_in_slices(sequence, hint) elif hint in (0, 1) and key is nokey: # do not split return [sequence] elif hint in (0, 1): # split by key blocks = [] for k, group in groupby(sequence, key).items(): blocks.append(group) ...
def split_in_blocks(sequence, hint, weight=lambda item: 1, key=nokey)
Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given...
3.406066
3.870628
0.879978
if isinstance(a, float) or isinstance(a, numpy.ndarray) and a.shape: # shortcut numpy.testing.assert_allclose(a, b, rtol, atol) return if isinstance(a, (str, bytes, int)): # another shortcut assert a == b, (a, b) return if hasattr(a, '_slots_'): # record...
def assert_close(a, b, rtol=1e-07, atol=0, context=None)
Compare for equality up to a given precision two composite objects which may contain floats. NB: if the objects are or contain generators, they are exhausted. :param a: an object :param b: another object :param rtol: relative tolerance :param atol: absolute tolerance
2.583319
2.564836
1.007206
if dir is not None: if not os.path.exists(dir): os.makedirs(dir) fh, path = tempfile.mkstemp(dir=dir, prefix=prefix, suffix=suffix) _tmp_paths.append(path) if content: fh = os.fdopen(fh, "wb") if hasattr(content, 'encode'): content = content.encode('u...
def gettemp(content=None, dir=None, prefix="tmp", suffix="tmp")
Create temporary file with the given content. Please note: the temporary file must be deleted by the caller. :param string content: the content to write to the temporary file. :param string dir: directory where the file should be created :param string prefix: file name prefix :param string suffix:...
2.008547
2.224368
0.902975
for path in _tmp_paths: if os.path.exists(path): # not removed yet try: os.remove(path) except PermissionError: pass
def removetmp()
Remove the temporary files created by gettemp
4.272044
4.698182
0.909297
# we assume that the .git folder is two levels above any package # i.e. openquake/engine/../../.git git_path = os.path.join(os.path.dirname(fname), '..', '..', '.git') # macOS complains if we try to execute git and it's not available. # Code will run, but a pop-up offering to install bloatware...
def git_suffix(fname)
:returns: `<short git hash>` if Git repository found
7.173479
7.125527
1.006729
if args: code %= args try: out = subprocess.check_output([sys.executable, '-c', code]) except subprocess.CalledProcessError as exc: print(exc.cmd[-1], file=sys.stderr) raise if out: return eval(out, {}, {})
def run_in_process(code, *args)
Run in an external process the given Python code and return the output as a Python object. If there are arguments, then code is taken as a template and traditional string interpolation is performed. :param code: string or template describing Python code :param args: arguments to be used for interpolati...
3.366219
3.416717
0.98522
already_imported = set(sys.modules) mod_or_pkg = importlib.import_module(module_or_package) if not hasattr(mod_or_pkg, '__path__'): # is a simple module return set(sys.modules) - already_imported # else import all modules contained in the package [pkg_path] = mod_or_pkg.__path__ n ...
def import_all(module_or_package)
If `module_or_package` is a module, just import it; if it is a package, recursively imports all the modules it contains. Returns the names of the modules that were imported as a set. The set can be empty if the modules were already in sys.modules.
3.219366
3.283817
0.980373
assert packages, 'At least one package must be specified' import_package = 'from openquake.baselib.general import import_all\n' \ 'print(import_all("%s"))' % package imported_modules = run_in_process(import_package) for mod in imported_modules: for pkg in packages: ...
def assert_independent(package, *packages)
:param package: Python name of a module/package :param packages: Python names of modules/packages Make sure the `package` does not depend from the `packages`.
4.278147
4.093308
1.045156
lst = module.split(".") pkg, submodule = lst[0], ".".join(lst[1:]) try: fileobj, filepath, descr = imp.find_module(pkg, syspath) except ImportError: return if submodule: # recursive search return search_module(submodule, [filepath]) return filepath
def search_module(module, syspath=sys.path)
Given a module name (possibly with dots) returns the corresponding filepath, or None, if the module cannot be found. :param module: (dotted) name of the Python module to look for :param syspath: a list of directories to search (default sys.path)
4.113458
4.323189
0.951487
kgroups = itertools.groupby(sorted(objects, key=key), key) return {k: reducegroup(group) for k, group in kgroups}
def groupby(objects, key, reducegroup=list)
:param objects: a sequence of objects with a key value :param key: the key function to extract the key value :param reducegroup: the function to apply to each group :returns: a dict {key value: map(reducegroup, group)} >>> groupby(['A1', 'A2', 'B1', 'B2', 'B3'], lambda x: x[0], ... lambda g...
3.757102
5.61622
0.668973
if isinstance(kfield, tuple): kgetter = operator.itemgetter(*kfield) else: kgetter = operator.itemgetter(kfield) if isinstance(vfield, tuple): vgetter = operator.itemgetter(*vfield) else: vgetter = operator.itemgetter(vfield) dic = groupby(records, kgetter, lambd...
def groupby2(records, kfield, vfield)
:param records: a sequence of records with positional or named fields :param kfield: the index/name/tuple specifying the field to use as a key :param vfield: the index/name/tuple specifying the field to use as a value :returns: an list of pairs of the form (key, [value, ...]). >>> groupby2(['A1', 'A2',...
2.113641
2.542819
0.831219
for name, value in kw.items(): array = array[array[name] == value] return array
def get_array(array, **kw)
Extract a subarray by filtering on the given keyword arguments
3.981772
2.601238
1.530722
if array_or_none1 is None and array_or_none2 is None: return False elif array_or_none1 is None and array_or_none2 is not None: return True elif array_or_none1 is not None and array_or_none2 is None: return True if array_or_none1.shape != array_or_none2.shape: return ...
def not_equal(array_or_none1, array_or_none2)
Compare two arrays that can also be None or have diffent shapes and returns a boolean. >>> a1 = numpy.array([1]) >>> a2 = numpy.array([2]) >>> a3 = numpy.array([2, 3]) >>> not_equal(a1, a2) True >>> not_equal(a1, a3) True >>> not_equal(a1, None) True
1.38388
1.52682
0.906381
if nbytes == 0: return '0 B' i = 0 while nbytes >= 1024 and i < len(suffixes) - 1: nbytes /= 1024. i += 1 f = ('%.2f' % nbytes).rstrip('0').rstrip('.') return '%s %s' % (f, suffixes[i])
def humansize(nbytes, suffixes=('B', 'KB', 'MB', 'GB', 'TB', 'PB'))
Return file size in a human-friendly format
1.332913
1.283436
1.03855
msg = '%s.%s has been deprecated. %s' % ( func.__module__, func.__name__, msg) if not hasattr(func, 'called'): warnings.warn(msg, DeprecationWarning, stacklevel=2) func.called = 0 func.called += 1 return func(*args, **kw)
def deprecated(func, msg='', *args, **kw)
A family of decorators to mark deprecated functions. :param msg: the message to print the first time the deprecated function is used. Here is an example of usage: >>> @deprecated(msg='Use new_function instead') ... def old_function(): ... 'Do something' Notice that if the...
2.362278
3.358503
0.703372
assert 0 < reduction_factor <= 1, reduction_factor rnd = random.Random(seed) out = [] for obj in objects: if rnd.random() <= reduction_factor: out.append(obj) return out
def random_filter(objects, reduction_factor, seed=42)
Given a list of objects, returns a sublist by extracting randomly some elements. The reduction factor (< 1) tells how small is the extracted list compared to the original list.
2.758647
2.704845
1.019891
numpy.random.seed(seed) return numpy.histogram(numpy.random.random(counts), nbins, (0, 1))[0]
def random_histogram(counts, nbins, seed)
Distribute a total number of counts on a set of bins homogenously. >>> random_histogram(1, 2, 42) array([1, 0]) >>> random_histogram(100, 5, 42) array([28, 18, 17, 19, 18]) >>> random_histogram(10000, 5, 42) array([2043, 2015, 2050, 1930, 1962])
2.946641
6.63715
0.443962
indices = AccumDict(accum=[]) # idx -> [(start, stop), ...] start = 0 for i, vals in itertools.groupby(integers): n = sum(1 for val in vals) indices[i].append((start, start + n)) start += n return indices
def get_indices(integers)
:param integers: a sequence of integers (with repetitions) :returns: a dict integer -> [(start, stop), ...] >>> get_indices([0, 0, 3, 3, 3, 2, 2, 0]) {0: [(0, 2), (7, 8)], 3: [(2, 5)], 2: [(5, 7)]}
5.336184
5.915383
0.902086
new_args = [] # when stdout is redirected to a file, python 2 uses ascii for the writer; # python 3 uses what is configured in the system (i.e. 'utf-8') # if sys.stdout is replaced by a StringIO instance, Python 2 does not # have an attribute 'encoding', and we assume ascii in that case str...
def safeprint(*args, **kwargs)
Convert and print characters using the proper encoding
5.199837
4.997266
1.040536
if hasattr(hostport, 'startswith'): # string representation of the hostport combination if hostport.startswith('tcp://'): hostport = hostport[6:] # strip tcp:// host, port = hostport.split(':') hostport = (host, int(port)) sock = socket.socket(socket.AF_INET, so...
def socket_ready(hostport)
:param hostport: a pair (host, port) or a string (tcp://)host:port :returns: True if the socket is ready and False otherwise
2.628654
2.606596
1.008463
prefix = len(os.path.commonprefix([os.path.dirname(f) for f in fnames])) with zipfile.ZipFile( archive, mode, zipfile.ZIP_DEFLATED, allowZip64=True) as z: for f in fnames: log('Archiving %s' % f) z.write(f, f[prefix:]) if cleanup: # remove the zipped...
def zipfiles(fnames, archive, mode='w', log=lambda msg: None, cleanup=False)
Build a zip archive from the given file names. :param fnames: list of path names :param archive: path of the archive
2.499738
3.192151
0.783089
# see https://pagure.io/python-daemon/blob/master/f/daemon/daemon.py and # https://stackoverflow.com/questions/45911705/why-use-os-setsid-in-python def fork_then_exit_parent(): pid = os.fork() if pid: # in parent os._exit(0) fork_then_exit_parent() os.setsid() f...
def detach_process()
Detach the current process from the controlling terminal by using a double fork. Can be used only on platforms with fork (no Windows).
4.043463
4.139364
0.976832
sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write('\x08' * len(msg)) sys.stdout.flush()
def println(msg)
Convenience function to print messages on a single line in the terminal
2.284684
2.422348
0.943169
msg = templ % args if args else templ tmp = tempfile.gettempdir() with open(os.path.join(tmp, 'debug.txt'), 'a', encoding='utf8') as f: f.write(msg + '\n')
def debug(templ, *args)
Append a debug line to the file /tmp/debug.txt
2.685175
2.386333
1.12523
if not args: sys.stderr.write('WARNING: ' + msg) else: sys.stderr.write('WARNING: ' + msg % args)
def warn(msg, *args)
Print a warning on stderr
2.448314
2.222998
1.101357
''' Find the memory footprint of a Python object recursively, see https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 :param o: the object :returns: the size in bytes ''' ids = ids or set() if id(o) in ids: return 0 nbytes = sys....
def getsizeof(o, ids=None)
Find the memory footprint of a Python object recursively, see https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 :param o: the object :returns: the size in bytes
2.663966
1.72855
1.541157
item, weight = item_weight self._seq.insert(i, item) self.weight += weight
def insert(self, i, item_weight)
Insert an item with the given weight in the sequence
6.081522
5.181479
1.173704
def decorator(func): for key in keys: self[key] = func return func return decorator
def add(self, *keys)
Return a decorator registering a new implementation for the CallableDict for the given keys.
3.951791
2.839437
1.391752
return self.__class__({key: func(value, *extras) for key, value in self.items()})
def apply(self, func, *extras)
>> a = AccumDict({'a': 1, 'b': 2}) >> a.apply(lambda x, y: 2 * x + y, 1) {'a': 3, 'b': 5}
4.193492
3.797237
1.104354
assert len(self.array) == len(array) arr = object.__new__(self.__class__) arr.dt = self.dt arr.slicedic = self.slicedic arr.array = array return arr
def new(self, array)
Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]>
5.010815
6.040902
0.829481
# extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] imean = self._get_mean(C, rup, dists, sites) if imt.name in "SA PGA": # Convert units to g, # but only for PGA and SA (not PGV): ...
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.262465
4.340137
0.982104
dmag = mag - self.CONSTS["Mh"] if mag < self.CONSTS["Mh"]: return C["e1"] + (C["b1"] * dmag) + (C["b2"] * (dmag ** 2.0)) else: return C["e1"] + (C["b3"] * dmag)
def _get_magnitude_scaling_term(self, C, mag)
Returns the magnitude scaling term of the GMPE described in equation 3
3.674091
3.46122
1.061502
r_adj = np.sqrt(rval ** 2.0 + C["h"] ** 2.0) return ( (C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) * np.log10(r_adj / self.CONSTS["Rref"]) - (C["c3"] * (r_adj - self.CONSTS["Rref"])))
def _get_distance_scaling_term(self, C, rval, mag)
Returns the distance scaling term of the GMPE described in equation 2
3.704178
3.427003
1.08088
SS, NS, RS = 0.0, 0.0, 0.0 if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0: # strike-slip SS = 1.0 elif rup.rake > 30.0 and rup.rake < 150.0: # reverse RS = 1.0 else: # normal NS = 1.0 ...
def _get_style_of_faulting_term(self, C, rup)
Returns the style-of-faulting term. Fault type (Strike-slip, Normal, Thrust/reverse) is derived from rake angle. Rakes angles within 30 of horizontal are strike-slip, angles from 30 to 150 are reverse, and angles from -30 to -150 are normal. Note that the 'Unspecified' ca...
2.579466
2.222566
1.16058
return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"])
def _get_site_amplification_term(self, C, vs30)
Returns the site amplification term for the case in which Vs30 is used directly
11.840969
13.735375
0.862078
f_s = np.zeros_like(vs30) # Site class B idx = np.logical_and(vs30 < 800.0, vs30 >= 360.0) f_s[idx] = C["eB"] # Site Class C idx = np.logical_and(vs30 < 360.0, vs30 >= 180.0) f_s[idx] = C["eC"] # Site Class D idx = vs30 < 180.0 f_s...
def _get_site_amplification_term(self, C, vs30)
Returns the site amplification given Eurocode 8 site classification
2.240484
2.095175
1.069354
return (self._get_magnitude_scaling_term(C, rup.mag) + self._get_distance_scaling_term(C, dists.rjb, rup.mag) + self._get_site_amplification_term(C, sites.vs30))
def _get_mean(self, C, rup, dists, sites)
Returns the mean value of ground motion - noting that in this case the style-of-faulting term is neglected
2.907911
2.863605
1.015472
oq = dstore['oqparam'] L = len(oq.loss_dt().names) R = dstore['csm_info'].get_num_rlzs() serials = dstore['ruptures']['serial'] idx_by_ser = dict(zip(serials, range(len(serials)))) tbl = numpy.zeros((len(serials), L), F32) lbr = numpy.zeros((R, L), F32) # losses by rlz for rec in d...
def build_loss_tables(dstore)
Compute the total losses by rupture and losses by rlzi.
7.626108
6.376886
1.195898
L = len(riskmodel.lti) epspath = param['epspath'] for ri in riskinputs: with monitor('getting hazard'): ri.hazard_getter.init() hazard = ri.hazard_getter.get_hazard() mon = monitor('build risk curves', measuremem=False) A = len(ri.aids) R = ri.haz...
def event_based_risk(riskinputs, riskmodel, param, monitor)
:param riskinputs: :class:`openquake.risklib.riskinput.RiskInput` objects :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: a dictionary of parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance ...
5.993564
5.905738
1.014871
name = 'table%d' % next(tablecounter) return HtmlTable([map(str, row) for row in header_rows], name).render()
def html(header_rows)
Convert a list of tuples describing a table into a HTML string
8.0272
7.58498
1.058302
templ = ''' <div id="tabs"> <ul> %s </ul> %s </div>''' lis = [] contents = [] for i, (tag_id, status, tag_content) in enumerate( zip(tag_ids, tag_status, tag_contents), 1): mark = '.' if status == 'complete' else '!' lis.append('<li><a href="#tabs-%d">%s%s</a></li>' % (i...
def make_tabs(tag_ids, tag_status, tag_contents)
Return a HTML string containing all the tabs we want to display
2.167095
2.139637
1.012833
if isodate == 'today': isodate = date.today() else: isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3]) isodate1 = isodate + timedelta(1) # +1 day tag_ids = [] tag_status = [] tag_contents = [] # the fetcher returns an header which is stripped with [1:] jobs =...
def make_report(isodate='today')
Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory.
4.999781
4.85687
1.029424
E = param['E'] L = len(riskmodel.loss_types) result = dict(agg=numpy.zeros((E, L), F32), avg=[], all_losses=AccumDict(accum={})) for ri in riskinputs: for out in riskmodel.gen_outputs(ri, monitor, param['epspath']): r = out.rlzi weight = param['weig...
def scenario_risk(riskinputs, riskmodel, param, monitor)
Core function for a scenario computation. :param riskinput: a of :class:`openquake.risklib.riskinput.RiskInput` object :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class...
6.38099
5.257714
1.213643
if config.dbserver.multi_user and getpass.getuser() != 'openquake': sys.exit('oq workers only works in single user mode') master = workerpool.WorkerMaster(config.dbserver.host, **config.zworkers) print(getattr(master, cmd)())
def workers(cmd)
start/stop/restart the workers, or return their status
14.841114
13.976057
1.061896
return EvenlyDiscretizedMFD(self.mmin + self.bin_width / 2., self.bin_width, self.occurrence_rate.tolist())
def to_evenly_discretized_mfd(self)
Returns the activity rate as an instance of the :class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD
4.952557
4.662654
1.062175
'''Returns the default upper and lower depth values if not in dictionary :param input_dict: Dictionary corresponding to the kwargs dictionary of calling function :returns: 'upper_depth': Upper seismogenic depth (float) 'lower_depth': Lower seismogenic depth (float) ''' if (...
def _check_depth_limits(input_dict)
Returns the default upper and lower depth values if not in dictionary :param input_dict: Dictionary corresponding to the kwargs dictionary of calling function :returns: 'upper_depth': Upper seismogenic depth (float) 'lower_depth': Lower seismogenic depth (float)
2.613053
1.751053
1.492276
''' As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array ''' # Get decimal seconds from seconds + microseconds temp_seconds = np.float(time.second) + (np.float(time.microsecond) / 1.0E6) retur...
def _get_decimal_from_datetime(time)
As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array
3.654263
2.096639
1.742915
''' Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of op...
def select_catalogue(self, valid_id)
Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of openquake.hmtk.seismic...
3.816234
2.149209
1.775646
''' Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity....
def within_polygon(self, polygon, distance=None, **kwargs)
Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`...
5.294781
3.124508
1.694597
''' Select earthquakes within a distance from a Point :param point: Centre point as instance of nhlib.geo.point.Point class :param float distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` ...
def circular_distance_from_point(self, point, distance, **kwargs)
Select earthquakes within a distance from a Point :param point: Centre point as instance of nhlib.geo.point.Point class :param float distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing ...
5.106774
2.608767
1.957544
''' Select earthquakes from within a square centered on a point :param point: Centre point as instance of nhlib.geo.point.Point class :param distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`...
def cartesian_square_centred_on_point(self, point, distance, **kwargs)
Select earthquakes from within a square centered on a point :param point: Centre point as instance of nhlib.geo.point.Point class :param distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` class c...
2.870654
2.040558
1.406799