code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
try:
if self._data_from_search:
agent = self._data_from_search.find(
'ul', {'class': 'links'}).text
return agent.split(':')[1].strip()
else:
return self._ad_page_content.find('a', {'id': 'smi-link-branded'}).tex... | def agent(self) | This method returns the agent name.
:return: | 5.602186 | 5.424238 | 1.032806 |
try:
if self._data_from_search:
agent = self._data_from_search.find('ul', {'class': 'links'})
links = agent.find_all('a')
return links[1]['href']
else:
return self._ad_page_content.find('a', {'id': 'smi-link-branded... | def agent_url(self) | This method returns the agent's url.
:return: | 4.507099 | 4.489753 | 1.003863 |
try:
number = self._ad_page_content.find(
'button', {'class': 'phone-number'})
return (base64.b64decode(number.attrs['data-p'])).decode('ascii')
except Exception as e:
if self._debug:
logging.error(
"Error g... | def contact_number(self) | This method returns the contact phone number.
:return: | 5.268614 | 5.183738 | 1.016374 |
try:
if self._data_from_search:
link = self._data_from_search.find('a', href=True)
return 'http://www.daft.ie' + link['href']
else:
return self._ad_page_content.find('link', {'rel': 'canonical'})['href']
except Exception as... | def daft_link(self) | This method returns the url of the listing.
:return: | 3.517113 | 3.340982 | 1.052718 |
try:
div = self._ad_page_content.find(
'div', {'class': 'description_extras'})
index = [i for i, s in enumerate(
div.contents) if 'Shortcode' in str(s)][0] + 1
return div.contents[index]['href']
except Exception as e:
... | def shortcode(self) | This method returns the shortcode url of the listing.
:return: | 4.69468 | 4.474502 | 1.049207 |
try:
div = self._ad_page_content.find(
'div', {'class': 'description_extras'})
index = [i for i, s in enumerate(
div.contents) if 'Property Views' in str(s)][0] + 1
return int(''.join(list(filter(str.isdigit, div.contents[index]))))
... | def views(self) | This method returns the "Property Views" from listing.
:return: | 4.68138 | 4.264647 | 1.097718 |
try:
if self._data_from_search:
info = self._data_from_search.find(
'ul', {"class": "info"}).text
s = info.split('|')
return s[0].strip()
else:
return self._ad_page_content.find(
... | def dwelling_type(self) | This method returns the dwelling type.
:return: | 5.216168 | 5.170949 | 1.008745 |
try:
if self._data_from_search:
info = self._data_from_search.find(
'div', {"class": "date_entered"}).text
s = info.split(':')
return s[-1].strip()
else:
div = self._ad_page_content.find(
... | def posted_since(self) | This method returns the date the listing was entered.
:return: | 4.214539 | 3.964803 | 1.062988 |
try:
if self._data_from_search:
info = self._data_from_search.find(
'ul', {"class": "info"}).text
s = info.split('|')
nb = s[1].strip()
return int(nb.split()[0])
else:
div = self.... | def bedrooms(self) | This method gets the number of bedrooms.
:return: | 4.066585 | 3.996431 | 1.017554 |
try:
infos = self._ad_page_content.find_all(
'div', {"class": "map_info_box"})
for info in infos:
if 'Distance to City Centre' in info.text:
distance_list = re.findall(
'Distance to City Centre: (.*) km'... | def city_center_distance(self) | This method gets the distance to city center, in km.
:return: | 4.133668 | 3.900542 | 1.059768 |
routes = {}
try:
big_div = self._ad_page_content.find(
'div', {"class": "half_area_box_right"})
uls = big_div.find("ul")
if uls is None:
return None
for li in uls.find_all('li'):
route_li = li.text.s... | def transport_routes(self) | This method gets a dict of routes listed in Daft.
:return: | 4.164271 | 3.969705 | 1.049013 |
try:
scripts = self._ad_page_content.find_all('script')
for script in scripts:
if 'longitude' in script.text:
find_list = re.findall(
r'"longitude":"([\-]?[0-9.]*[0-9]+)"', script.text)
if len(find_l... | def longitude(self) | This method gets a dict of routes listed in Daft.
:return: | 3.629207 | 3.578692 | 1.014116 |
try:
alt_text = self._ad_page_content.find(
'span', {'class': 'ber-hover'}
).find('img')['alt']
if ('exempt' in alt_text):
return 'exempt'
else:
alt_arr = alt_text.split()
if 'ber' in alt_ar... | def ber_code(self) | This method gets ber code listed in Daft.
:return: | 4.512335 | 4.449023 | 1.01423 |
req = Request(debug=self._debug)
ad_search_type = self.search_type
agent_id = self.agent_id
ad_id = self.id
response = req.post('https://www.daft.ie/ajax_endpoint.php?', params={
'action': 'daft_contact_advertiser',
'from': name,
'e... | def contact_advertiser(self, name, email, contact_number, message) | This method allows you to contact the advertiser of a listing.
:param name: Your name
:param email: Your email address.
:param contact_number: Your contact number.
:param message: Your message.
:return: | 2.661849 | 2.726186 | 0.9764 |
return {
'search_type': self.search_type,
'agent_id': self.agent_id,
'id': self.id,
'price': self.price,
'price_change': self.price_change,
'viewings': self.upcoming_viewings,
'facilities': self.facilities,
... | def as_dict(self) | Return a Listing object as Dictionary
:return: dict | 2.961597 | 2.913855 | 1.016384 |
"Fetch the variables and functions"
#print("Here is the config:", config)
# fetch variables from YAML file:
self._variables = config.get(YAML_SUBSET)
# add variables and functions from the module:
module_reader.load_variables(self._variables, config)
print("Var... | def on_config(self, config) | Fetch the variables and functions | 13.235082 | 9.758271 | 1.356294 |
"Provide a hook for defining functions from an external module"
# the site_navigation argument has been made optional
# (deleted in post 1.0 mkdocs, but maintained here
# for backward compatibility)
if not self.variables:
return markdown
else:
... | def on_page_markdown(self, markdown, page, config,
site_navigation=None, **kwargs) | Provide a hook for defining functions from an external module | 13.862021 | 9.558552 | 1.450222 |
def macro(v, name=''):
name = name or v.__name__
variables[name] = v
return v
# determine the package name, from the filename:
python_module = config.get('python_module') or DEFAULT_MODULE_NAME
# get the directory of the yaml file:
config_file = config['con... | def load_variables(variables, config) | Add the template functions, via the python module
located in the same directory as the Yaml config file.
The python module must contain the following hook:
declare_variables(variables, macro):
variables['a'] = 5
@macro
def bar(x):
....
@macro
def baz... | 6.099211 | 4.968483 | 1.22758 |
if version_info < (3, 0) or validate:
if validate and len(s) % 4 != 0:
raise BinAsciiError('Incorrect padding')
s = _get_bytes(s)
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version... | def b64decode(s, altchars=None, validate=False) | Decode bytes encoded with the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` or ASCII string to
decode.
Optional ``altchars`` must be a :term:`bytes-like object` or ASCII
string of length 2 which specifies the alternative alphabet used instead
of the '+' and '/' characters... | 2.877239 | 2.817064 | 1.021361 |
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version_info < (3, 0):
if isinstance(s, text_type):
raise TypeError('a bytes-like object is required, not \''
+ type(s).__name__ + '\'')
... | def b64encode(s, altchars=None) | Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe ... | 3.482126 | 3.505211 | 0.993414 |
for ext in ['*.so', '*.pyd']:
for file in glob.glob('./pybase64/' + ext):
log.info("removing '%s'", file)
if self.dry_run:
continue
os.remove(file) | def run(self) | Run command. | 5.814494 | 5.799055 | 1.002662 |
try:
song_name = os.path.splitext(song_name)[0]
except IndexError:
pass
song_name = song_name.partition('ft')[0]
# Replace characters to filter with spaces
song_name = ''.join(
map(lambda c: " " if c in chars_filter else c, song_name))
# Remove crap words
son... | def improve_name(song_name) | Improves file name by removing words such as HD, Official,etc
eg : Hey Jude (Official HD) lyrics -> Hey Jude
This helps in better searching of metadata since a spotify search of
'Hey Jude (Official HD) lyrics' fetches 0 results | 3.265969 | 3.275213 | 0.997178 |
YOUTUBECLASS = 'spf-prefetch'
html = requests.get("https://www.youtube.com/results",
params={'search_query': song_input})
soup = BeautifulSoup(html.text, 'html.parser')
soup_section = soup.findAll('a', {'rel': YOUTUBECLASS})
# Use generator over list, since storage is... | def get_song_urls(song_input) | Gather all urls, titles for a search query
from youtube | 3.825119 | 3.609012 | 1.05988 |
outtmpl = song_title + '.%(ext)s'
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': outtmpl,
'postprocessors': [
{'key': 'FFmpegExtractAudio','preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
... | def download_song(song_url, song_title) | Download a song using youtube url and song title | 1.710229 | 1.722234 | 0.993029 |
song_name = improve_name(file_name) # Remove useless words from title
client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
results = spotify.search(song_name, limit=1)
results = resul... | def get_metadata(file_name, client_id, client_secret) | Tries finding metadata through Spotify | 2.882999 | 2.711549 | 1.06323 |
img = requests.get(album_art, stream=True) # Gets album art from url
img = img.raw
audio = EasyMP3(file_name, ID3=ID3)
try:
audio.add_tags()
except _util.error:
pass
audio.tags.add(
APIC(
encoding=3, # UTF-8
mime='image/png',
... | def add_album_art(file_name, album_art) | Add album_art in .mp3's tags | 3.007632 | 2.824157 | 1.064966 |
tags = EasyMP3(file_name)
if title:
tags["title"] = title
if artist:
tags["artist"] = artist
if album:
tags["album"] = album
tags.save()
return file_name | def add_metadata(file_name, title, artist, album) | As the method name suggests | 2.982908 | 3.35984 | 0.887812 |
for file_path in files:
tags = EasyMP3(file_path)
tags.delete()
tags.save() | def revert_metadata(files) | Removes all tags from a mp3 file | 7.314019 | 4.289663 | 1.705033 |
qset = self.filter(user=user)
if not qset:
return None
if qset.count() > 1:
raise Exception('This app does not currently support multiple vault ids')
return qset.get() | def get_user_vault_instance_or_none(self, user) | Returns a vault_id string or None | 4.773189 | 4.470996 | 1.06759 |
assert self.is_in_vault(user)
if vault_id:
user_vault = self.get(user=user, vault_id=vault_id)
else:
user_vault = self.get(user=user) | def charge(self, user, vault_id=None) | If vault_id is not passed this will assume that there is only one instane of user and vault_id in the db. | 2.806076 | 2.619694 | 1.071146 |
try:
result = Transaction.sale(
{
'amount': amount.quantize(Decimal('.01')),
'customer_id': self.vault_id,
"options": {
"submit_for_settlement": True
}
}
... | def charge(self, amount) | Charges the users credit card, with he passed $amount, if they are in the vault. Returns the payment_log instance
or None (if charge fails etc.) | 3.478983 | 2.914896 | 1.193519 |
assert self.is_valid()
cc_details_map = { # cc details
'number': self.cleaned_data['cc_number'],
'cardholder_name': self.cleaned_data['name'],
'expiration_date': '%s/%s' %\
(self.cleaned_data['expiration_month'], self.cleaned_data[... | def save(self, prepend_vault_id='') | Adds or updates a users CC to the vault.
@prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by
multiple projects/apps. | 4.960135 | 4.626248 | 1.072172 |
d = {}
if request.method == 'POST':
# Credit Card is being changed/updated by the user
form = UserCCDetailsForm(request.user, True, request.POST)
if form.is_valid():
response = form.save()
if response.is_success:
messages.add_message(requ... | def payments_billing(request, template='django_braintree/payments_billing.html') | Renders both the past payments that have occurred on the users credit card, but also their CC information on file
(if any) | 4.022186 | 3.900055 | 1.031315 |
path = '{0}/{1}/{2}'.format(self.collection.name, self.id, 'snooze')
data = {"duration": duration}
extra_headers = {"From": requester}
return self.pagerduty.request('POST', path, data=_json_dumper(data), extra_headers=extra_headers) | def snooze(self, requester, duration) | Snooze incident.
:param requester: The email address of the individual requesting snooze. | 4.38978 | 4.889166 | 0.897858 |
path = '{0}'.format(self.collection.name)
assignments = []
if not user_ids:
raise Error('Must pass at least one user id')
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_r... | def reassign(self, user_ids, requester) | Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign | 3.359226 | 3.426088 | 0.980484 |
return self.create_event(description, "resolve",
details, incident_key) | def resolve_incident(self, incident_key,
description=None, details=None) | Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed. | 11.142553 | 13.119367 | 0.849321 |
'''Recurse through dictionary and replace any keys "self" with
"self_"'''
if type(response) is list:
for elem in response:
clean_response(elem)
elif type(response) is dict:
for key, val in response.items():
if key == 'self':
val = response.pop('sel... | def clean_response(response) | Recurse through dictionary and replace any keys "self" with
"self_" | 3.427783 | 2.137256 | 1.603824 |
if not string:
return ""
new_string = [string[0].lower()]
for char in string[1:]:
if char.isupper():
new_string.append("_")
new_string.append(char.lower())
return "".join(new_string) | def _lower(string) | Custom lower string function.
Examples:
FooBar -> foo_bar | 2.08638 | 2.154066 | 0.968578 |
if not user_ids:
raise Error('Must pass at least one user id')
self._do_action('reassign', requester_id=requester_id, assigned_to_user=','.join(user_ids)) | def reassign(self, user_ids, requester_id) | Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids | 4.934003 | 5.579332 | 0.884336 |
return self.create_event(service_key, description, "acknowledge",
details, incident_key) | def acknowledge_incident(self, service_key, incident_key,
description=None, details=None) | Causes the referenced incident to enter the acknowledged state.
Send an acknowledge event when someone is presently working on the
incident. | 6.27924 | 8.349441 | 0.752055 |
return self.create_event(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | def trigger_incident(self, service_key, description,
incident_key=None, details=None,
client=None, client_url=None, contexts=None) | Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident. | 3.064194 | 4.10394 | 0.746647 |
if isinstance(dataset, numpy.ndarray) and not len(dataset.shape) == 4:
check_dataset_shape(dataset)
check_dataset_range(dataset)
else: # must be a list of arrays or a 4D NumPy array
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
rai... | def check_dataset(dataset) | Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK. | 3.364534 | 3.085645 | 1.090383 |
if isinstance(dataset, numpy.ndarray):
if len(dataset.shape) == 3: # NumPy 3D
if dataset.shape[-1] == 3:
return dataset.transpose((2, 0, 1))
elif len(dataset.shape) == 4: # NumPy 4D
if dataset.shape[-1] == 3:
return dataset.transpose((0,... | def try_fix_dataset(dataset) | Transpose the image data if it's in PIL format. | 2.213011 | 2.079236 | 1.064339 |
dim, nrow, ncol = dataset.shape
uint8_dataset = dataset.astype('uint8')
if not (uint8_dataset == dataset).all():
message = (
"\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), "
"but some information was lost.\nPlease check your gif and "
"convert t... | def get_image(dataset) | Convert the NumPy array to two nested lists with r,g,b tuples. | 4.482162 | 4.283247 | 1.04644 |
nbits = max(math.ceil(math.log(num_colors, 2)), 2)
return '{:03b}'.format(int(nbits - 1)) | def get_color_table_size(num_colors) | Total values in the color table is 2**(1 + int(result, base=2)).
The result is a three-bit value (represented as a string with
ones or zeros) that will become part of a packed byte encoding
various details about the color table, used in the Logical
Screen Descriptor block. | 3.889806 | 4.51002 | 0.862481 |
colors = Counter(pixel for row in image for pixel in row)
if len(colors) > 256:
msg = (
"The maximum number of distinct colors in a GIF is 256 but "
"this image has {} colors and can't be encoded properly."
)
raise RuntimeError(msg.format(len(colors)))
re... | def get_colors(image) | Return a Counter containing each color and how often it appears. | 4.909129 | 4.272678 | 1.148958 |
global_color_table = b''.join(c[0] for c in colors.most_common())
full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2))
repeats = 3 * (full_table_size - len(colors))
zeros = struct.pack('<{}x'.format(repeats))
return global_color_table + zeros | def _get_global_color_table(colors) | Return a color table sorted in descending order of count. | 5.546288 | 5.265185 | 1.053389 |
lzw_code_size, coded_bits = _lzw_encode(image, colors)
coded_bytes = ''.join(
'{{:0{}b}}'.format(nbits).format(val) for val, nbits in coded_bits)
coded_bytes = '0' * ((8 - len(coded_bytes)) % 8) + coded_bytes
coded_data = list(
reversed([
int(coded_bytes[8*i:8*(i+1)], 2)... | def _get_image_data(image, colors) | Performs the LZW compression as described by Matthew Flickinger.
This isn't fast, but it works.
http://www.matthewflickinger.com/lab/whatsinagif/lzw_image_data.asp | 2.623998 | 2.575447 | 1.018851 |
try:
check_dataset(dataset)
except ValueError as e:
dataset = try_fix_dataset(dataset)
check_dataset(dataset)
delay_time = 100 // int(fps)
def encode(d):
four_d = isinstance(dataset, numpy.ndarray) and len(dataset.shape) == 4
if four_d or not isinstance(data... | def write_gif(dataset, filename, fps=10) | Write a NumPy array to GIF 89a format.
Or write a list of NumPy arrays to an animation (GIF 89a format).
- Positional arguments::
:param dataset: A NumPy arrayor list of arrays with shape
rgb x rows x cols and integer values in [0, 255].
:param filename: The output fil... | 3.852461 | 4.031632 | 0.955559 |
logging.basicConfig(level=logging.DEBUG)
# create the application and the main window
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
# setup ui
ui = example_ui.Ui_MainWindow()
ui.setupUi(window)
ui.bt_delay_popup.addActions([
ui.actionAction,
... | def main() | Application entry point | 3.729223 | 3.61806 | 1.030725 |
# Smart import of the rc file
f = QtCore.QFile(':qdarkgraystyle/style.qss')
if not f.exists():
_logger().error('Unable to load stylesheet, file not found in '
'resources')
return ''
else:
f.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text)
ts =... | def load_stylesheet() | Loads the stylesheet for use in a pyqt5 application.
:return the stylesheet string | 4.442508 | 4.30756 | 1.031328 |
# If input is not flow, then create from iamge sequence
try:
assert image_sequence_or_flow.ndim == 1
flow_org = image_sequence_or_flow
except AssertionError:
flow_org = tracking.optical_flow_magnitude(image_sequence_or_flow)
# Gyro from gyro data
gyro_mag =... | def sync_camera_gyro(image_sequence_or_flow, image_timestamps, gyro_data, gyro_timestamps, levels=6, full_output=False) | Get time offset that aligns image timestamps with gyro timestamps.
Given an image sequence, and gyroscope data, with their respective timestamps,
calculate the offset that aligns the image data with the gyro data.
The timestamps must only differ by an offset, not a scale factor.
This function find... | 4.092644 | 3.953516 | 1.035191 |
flow = tracking.optical_flow_magnitude(image_sequence)
flow_timestamps = image_timestamps[:-2]
# Let user select points in both pieces of data
(frame_pair, gyro_idx) = manual_sync_pick(flow, gyro_timestamps, gyro_data)
# Normalize data
gyro_abs_max = np.max(np.abs(gyro_da... | def sync_camera_gyro_manual(image_sequence, image_timestamps, gyro_data, gyro_timestamps, full_output=False) | Get time offset that aligns image timestamps with gyro timestamps.
Given an image sequence, and gyroscope data, with their respective timestamps,
calculate the offset that aligns the image data with the gyro data.
The timestamps must only differ by an offset, not a scale factor.
This function find... | 4.257293 | 4.069656 | 1.046106 |
endpoints = []
in_low = False
for i, val in enumerate(flow):
if val < motion_threshold:
if not in_low:
endpoints.append(i)
in_low = True
else:
if in_low:
endpoints.append(i-1) # Previous was last in a low spot
... | def good_sequences_to_track(flow, motion_threshold=1.0) | Get list of good frames to do tracking in.
Looking at the optical flow, this function chooses a span of frames
that fulfill certain criteria.
These include
* not being too short or too long
* not too low or too high mean flow magnitude
* a low max value (avoids motion blur)
Curr... | 2.494828 | 2.509679 | 0.994082 |
self.params['user']['gyro_rate'] = gyro_rate
for p in ('gbias_x', 'gbias_y', 'gbias_z'):
self.params['initialized'][p] = 0.0
if slices is not None:
self.slices = slices
if self.slices is None:
self.slices = videoslice.Slice.from_stream_rand... | def initialize(self, gyro_rate, slices=None, skip_estimation=False) | Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
... | 4.047801 | 3.544798 | 1.141899 |
f_g = self.parameter['gyro_rate']
d_c = self.parameter['time_offset']
n = f_g * (t + d_c)
n0 = int(np.floor(n))
tau = n - n0
return n0, tau | def video_time_to_gyro_sample(self, t) | Convert video time to gyroscope sample index and interpolation factor
Parameters
-------------------
t : float
Video timestamp
Returns
--------------------
n : int
Sample index that precedes t
tau : float
Interpolation factor ... | 5.200774 | 5.176226 | 1.004742 |
D = {}
for source in PARAM_SOURCE_ORDER:
D.update(self.params[source])
return D | def parameter(self) | Return the current best value of a parameter | 9.573488 | 10.792624 | 0.88704 |
x0 = np.array([self.parameter[param] for param in PARAM_ORDER])
available_tracks = np.sum([len(s.inliers) for s in self.slices])
if available_tracks < max_tracks:
warnings.warn("Could not use the requested {} tracks, since only {} were available in the slice data.".format(ma... | def calibrate(self, max_tracks=MAX_OPTIMIZATION_TRACKS, max_eval=MAX_OPTIMIZATION_FEV, norm_c=DEFAULT_NORM_C) | Perform calibration
Parameters
----------------------
max_eval : int
Maximum number of function evaluations
Returns
---------------------
dict
Optimization result
Raises
-----------------------
CalibrationError
... | 3.935332 | 4.162555 | 0.945413 |
flow = self.video.flow
gyro_rate = self.parameter['gyro_rate']
frame_times = np.arange(len(flow)) / self.video.frame_rate
gyro_times = np.arange(self.gyro.num_samples) / gyro_rate
time_offset = timesync.sync_camera_gyro(flow, frame_times, self.gyro.data.T, gyro_times, le... | def find_initial_offset(self, pyramids=6) | Estimate time offset
This sets and returns the initial time offset estimation.
Parameters
---------------
pyramids : int
Number of pyramids to use for ZNCC calculations.
If initial estimation of time offset fails, try lowering this value.
... | 5.458827 | 6.024276 | 0.906138 |
if 'time_offset' not in self.parameter:
raise InitializationError("Can not estimate rotation without an estimate of time offset. Please estimate the offset and try again.")
dt = float(1.0 / self.parameter['gyro_rate']) # Must be python float for fastintegrate
q ... | def find_initial_rotation(self) | Estimate rotation between camera and gyroscope
This sets and returns the initial rotation estimate.
Note that the initial time offset must have been estimated before calling this function!
Returns
--------------------
(3,3) ndarray
Estimated rotation betwee... | 4.248617 | 4.198574 | 1.011919 |
print("Parameters")
print("--------------------")
for param in PARAM_ORDER:
print(' {:>11s} = {}'.format(param, self.parameter[param])) | def print_params(self) | Print the current best set of parameters | 6.482729 | 6.339329 | 1.022621 |
gf1 = cv2.getGaussianKernel(ksize, gstd1)
gf2 = cv2.getGaussianKernel(ksize, gstd2)
gf3 = cv2.getGaussianKernel(ksize, gstd3)
sqrtimg = cv2.sqrt(img)
p1 = cv2.sepFilter2D(sqrtimg, -1, gf1, gf1)
p2 = cv2.sepFilter2D(sqrtimg, -1, gf2, gf2)
maxarr = np.maximum(0, (p1 - p2) / p2)
minarr... | def remove_slp(img, gstd1=GSTD1, gstd2=GSTD2, gstd3=GSTD3, ksize=KSIZE, w=W) | Remove the SLP from kinect IR image
The input image should be a float32 numpy array, and should NOT be a square root image
Parameters
------------------
img : (M, N) float ndarray
Kinect NIR image with SLP pattern
gstd1 : float
Standard deviation of gaussian kernel 1
... | 2.952372 | 3.010046 | 0.980839 |
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_... | def from_hdf(cls, filename) | Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
... | 3.812725 | 2.035504 | 1.873111 |
X = points if not points.ndim == 1 else points.reshape((points.size, 1))
wx, wy = self.wc
# Switch to polar coordinates
rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2)
phi = np.arctan2(X[1,:] - wy, X[0,:]-wx)
# 'atan' method
r = np.tan(rn * self.lgamm... | def invert(self, points) | Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points | 4.245648 | 4.732121 | 0.897198 |
K = self.camera_matrix
XU = points
XU = XU / np.tile(XU[2], (3,1))
X = self.apply(XU)
x2d = np.dot(K, X)
return from_homogeneous(x2d) | def project(self, points) | Project 3D points to image coordinates.
This projects 3D points expressed in the camera coordinate system to image points.
Parameters
--------------------
points : (3, N) ndarray
3D points
Returns
--------------------
image_points : (2, N) ndarray
... | 5.754476 | 6.549926 | 0.878556 |
Ki = self.inv_camera_matrix
X = np.dot(Ki, to_homogeneous(image_points))
X = X / X[2]
XU = self.invert(X)
return XU | def unproject(self, image_points) | Find (up to scale) 3D coordinate of an image point
This is the inverse of the `project` function.
The resulting 3D points are only valid up to an unknown scale.
Parameters
----------------------
image_points : (2, N) ndarray
Image points
Returns
---... | 6.2766 | 7.369689 | 0.851678 |
rvec = tvec = np.zeros(3)
image_points, jac = cv2.projectPoints(points.T.reshape(-1,1,3), rvec, tvec, self.camera_matrix, self.dist_coefs)
return image_points.reshape(-1,2).T | def project(self, points) | Project 3D points to image coordinates.
This projects 3D points expressed in the camera coordinate system to image points.
Parameters
--------------------
points : (3, N) ndarray
3D points
Returns
--------------------
image_points : (2, N) ndarray
... | 2.899207 | 3.16976 | 0.914646 |
undist_image_points = cv2.undistortPoints(image_points.T.reshape(1,-1,2), self.camera_matrix, self.dist_coefs, P=self.camera_matrix)
world_points = np.dot(self.inv_camera_matrix, to_homogeneous(undist_image_points.reshape(-1,2).T))
return world_points | def unproject(self, image_points) | Find (up to scale) 3D coordinate of an image point
This is the inverse of the `project` function.
The resulting 3D points are only valid up to an unknown scale.
Parameters
----------------------
image_points : (2, N) ndarray
Image points
Returns
---... | 2.847396 | 3.274031 | 0.869691 |
"Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps)."
timestamps = np.array([Kinect.timestamp_from_filename(fname) for fname in file_list])
# Handle overflow
diff = np.diff(timestamps)
idxs = n... | def timestamps_from_file_list(file_list) | Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps). | 8.074514 | 3.83679 | 2.104497 |
"Given a list of image files, find bad frames, remove them and modify file_list"
MAX_INITIAL_BAD_FRAMES = 15
bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list))
# Trivial case
if not bad_ts:
return file_list
# No bad fr... | def purge_bad_timestamp_files(file_list) | Given a list of image files, find bad frames, remove them and modify file_list | 4.698282 | 3.904018 | 1.203448 |
(root, filename) = os.path.split(video_filename)
needle_ts = int(filename.split('-')[2].split('.')[0])
haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list))
haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0]
depth_filename = depth_fi... | def depth_file_for_nir_file(video_filename, depth_file_list) | Returns the corresponding depth filename given a NIR filename | 3.254307 | 3.157841 | 1.030548 |
(root, filename) = os.path.split(rgb_filename)
rgb_timestamps = np.array(Kinect.timestamps_from_file_list(rgb_file_list))
depth_timestamps = np.array(Kinect.timestamps_from_file_list(depth_file_list))
needle_ts = rgb_timestamps[rgb_file_list.index(rgb_filename)]
haystack... | def depth_file_for_rgb_file(rgb_filename, rgb_file_list, depth_file_list) | Returns the *closest* depth file from an RGB filename | 2.679957 | 2.571733 | 1.042082 |
"Remove all files without its own counterpart. Returns new lists of files"
new_video_list = []
new_depth_list = []
for fname in video_file_list:
try:
depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list)
new_video_... | def find_nir_file_with_missing_depth(video_file_list, depth_file_list) | Remove all files without its own counterpart. Returns new lists of files | 3.25508 | 2.486346 | 1.309182 |
"Convert image of Kinect disparity values to distance (linear method)"
dist_img = dval_img / 2048.0
dist_img = 1 / (self.opars[0]*dist_img + self.opars[1])
return dist_img | def disparity_image_to_distance(self, dval_img) | Convert image of Kinect disparity values to distance (linear method) | 8.453995 | 4.948099 | 1.708534 |
A = [len(s.inliers) for s in slice_list]
N_max = np.sum(A)
if N > N_max:
raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max))
samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group
remaining = N
whi... | def fill_sampling(slice_list, N) | Given a list of slices, draw N samples such that each slice contributes as much as possible
Parameters
--------------------------
slice_list : list of Slice
List of slices
N : int
Number of samples to draw | 3.684188 | 3.763504 | 0.978925 |
if self.axis is None:
x = self.points[:, 0, :].T
y = self.points[:, -1, :].T
inlier_ratio = 0.5
R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y,
camera,
... | def estimate_rotation(self, camera, ransac_threshold=7.0) | Estimate the rotation between first and last frame
It uses RANSAC where the error metric is the reprojection error of the points
from the last frame to the first frame.
Parameters
-----------------
camera : CameraModel
Camera model
ransac_threshold : float
... | 4.225238 | 4.518031 | 0.935195 |
new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1]))
new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1]))
seq_frames = []
slices = []
seq_start_points = None
next_seq_start = new_step() if... | def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10) | Create slices from a video stream using random sampling
Parameters
-----------------
video_stream : VideoStream
A video stream
step_bounds : tuple
Range bounds (inclusive) of possible step lengths
length_bounds : tuple
Range bounds (inclusive)... | 2.564723 | 2.656633 | 0.965403 |
assert X.shape == Y.shape
assert X.shape[0] > 1
# Minimal case, create third point using cross product
if X.shape[0] == 2:
X3 = np.cross(X[:,0], X[:,1], axis=0)
X = np.hstack((X, X3 / np.linalg.norm(X3)))
Y3 = np.cross(Y[:,0], Y[:,1], axis=0)
Y = np.hstack((Y, ... | def procrustes(X, Y, remove_mean=False) | Orthogonal procrustes problem solver
The procrustes problem finds the best rotation R, and translation t
where
X = R*Y + t
The number of points in X and Y must be at least 2.
For the minimal case of two points, a third point is temporarily created
and used for the estimation.
... | 2.611223 | 2.453054 | 1.064478 |
assert R.shape == (3,3)
assert_almost_equal(np.linalg.det(R), 1.0, err_msg="Not a rotation matrix: determinant was not 1")
S, V = np.linalg.eig(R)
k = np.argmin(np.abs(S - 1.))
s = S[k]
assert_almost_equal(s, 1.0, err_msg="Not a rotation matrix: No eigen value s=1")
v = np.real(V[:, k])... | def rotation_matrix_to_axis_angle(R) | Convert a 3D rotation matrix to a 3D axis angle representation
Parameters
---------------
R : (3,3) array
Rotation matrix
Returns
----------------
v : (3,) array
(Unit-) rotation angle
theta : float
Angle of rotations, in radians
Note
------... | 2.606009 | 2.565317 | 1.015862 |
if np.abs(theta) < np.spacing(1):
return np.eye(3)
else:
v = v.reshape(3,1)
np.testing.assert_almost_equal(np.linalg.norm(v), 1.)
vx = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
vvt = np.dot(v, v.T)
... | def axis_angle_to_rotation_matrix(v, theta) | Convert rotation from axis-angle to rotation matrix
Parameters
---------------
v : (3,) ndarray
Rotation axis (normalized)
theta : float
Rotation angle (radians)
Returns
----------------
R : (3,3) ndarray
Rotation matrix | 2.229653 | 2.382759 | 0.935745 |
q = q.flatten()
assert q.size == 4
assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!")
qq = q ** 2
R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] -
2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]],
[2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] -
q... | def quat_to_rotation_matrix(q) | Convert unit quaternion to rotation matrix
Parameters
-------------
q : (4,) ndarray
Unit quaternion, scalar as first element
Returns
----------------
R : (3,3) ndarray
Rotation matrix | 1.545731 | 1.592733 | 0.97049 |
#NB: Quaternion q = [a, n1, n2, n3], scalar first
q_list = np.zeros((gyro_ts.shape[0], 4)) # Nx4 quaternion list
q_list[0,:] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation)
# Iterate over all (except first)
for i in range(1, gyro_ts.size):
w = gyro_data[i]
dt = gy... | def integrate_gyro_quaternion(gyro_ts, gyro_data) | Integrate angular velocities to rotations
Parameters
---------------
gyro_ts : ndarray
Timestamps
gyro_data : (3, N) ndarray
Angular velocity measurements
Returns
---------------
rotations : (4, N) ndarray
Rotation sequence as unit quaternions (f... | 2.751794 | 2.726628 | 1.009229 |
q1 = q1.flatten()
q2 = q2.flatten()
assert q1.shape == q2.shape
assert q1.size == 4
costheta = np.dot(q1, q2)
if np.isclose(u, 0.):
return q1
elif np.isclose(u, 1.):
return q2
elif u > 1 or u < 0:
raise ValueError("u must be in range [0, 1]")
# Shortest... | def slerp(q1, q2, u) | SLERP: Spherical linear interpolation between two unit quaternions.
Parameters
------------
q1 : (4, ) ndarray
Unit quaternion (first element scalar)
q2 : (4, ) ndarray
Unit quaternion (first element scalar)
u : float
Interpolation factor in range [0,1] where... | 2.268945 | 2.349572 | 0.965684 |
assert x.shape == y.shape
assert x.shape[0] == 2
X = camera.unproject(x)
Y = camera.unproject(y)
data = np.vstack((X, Y, x))
assert data.shape[0] == 8
model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation)
def eval_func(model, data)... | def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False) | Calculate rotation between two sets of image coordinates using ransac.
Inlier criteria is the reprojection error of y into image 1.
Parameters
-------------------------
x : array 2xN image coordinates in image 1
y : array 2xN image coordinates in image 2
camera : Camera model
threshold... | 3.174117 | 3.225483 | 0.984075 |
M = None
max_consensus = 0
all_idx = list(range(data.shape[1]))
final_consensus = []
for k in range(num_iter):
np.random.shuffle(all_idx)
model_set = all_idx[:num_points]
x = data[:, model_set]
m = model_func(x)
model_error = eval_func(m, data)
a... | def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False) | Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data ... | 2.693147 | 2.973257 | 0.90579 |
params = GFTT_DEFAULTS
if gftt_params:
params.update(gftt_params)
if initial_points is None:
initial_points = cv2.goodFeaturesToTrack(img1, params['max_corners'], params['quality_level'], params['min_distance'])
[_points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, ini... | def track_points(img1, img2, initial_points=None, gftt_params={}) | Track points between two images
Parameters
-----------------
img1 : (M, N) ndarray
First image
img2 : (M, N) ndarray
Second image
initial_points : ndarray
Initial points. If empty, initial points will be calculated from
img1 using goodFeaturesToTr... | 2.553009 | 2.980609 | 0.856539 |
flow = []
prev_img = None
for img in image_sequence:
if img.ndim == 3 and img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if prev_img is None:
prev_img = img
continue
(next_points, prev_points) = track_points(prev_img, img, gft... | def optical_flow_magnitude(image_sequence, max_diff=60, gftt_options={}) | Return optical flow magnitude for the given image sequence
The flow magnitude is the mean value of the total (sparse) optical flow
between two images.
Crude outlier detection using the max_diff parameter is used.
Parameters
----------------
image_sequence : sequence
Sequenc... | 2.576895 | 2.572788 | 1.001596 |
# Precreate track array
tracks = np.zeros((initial_points.shape[0], len(image_list), 2), dtype='float32') # NxMx2
tracks[:,0,:] = np.reshape(np.array(initial_points), [-1,2])
track_status = np.ones([np.size(initial_points,0),1]) # All initial points are OK
empty = np.array([])
window_size =... | def track(image_list, initial_points, remove_bad=True) | Track points in image list
Parameters
----------------
image_list : list
List of images to track in
initial_points : ndarray
Initial points to use (in first image in image_list)
remove_bad : bool
If True, then the resulting list of tracks will only contain su... | 3.449874 | 3.390661 | 1.017463 |
(forward_track, forward_status) = track(image_list, initial_points, remove_bad=False)
# Reverse the order
(backward_track, backward_status) = track(image_list[::-1], forward_track[:,-1,:], remove_bad=False)
# Prune bad tracks
ok_track = np.flatnonzero(forward_status * backward_status) # Only g... | def track_retrack(image_list, initial_points, max_retrack_distance=0.5, keep_bad=False) | Track-retracks points in image list
Using track-retrack can help in only getting point tracks of high quality.
The point is tracked forward, and then backwards in the image sequence.
Points that end up further than max_retrack_distance from its starting point
are marked as bad.
Pa... | 3.332981 | 3.288329 | 1.013579 |
M = scipy.io.loadmat(matfilename)
instance = cls()
instance.gyro_data = M['gyro']
instance.timestamps = M['timestamps']
return instance | def from_mat_file(cls, matfilename) | Load gyro data from .mat file
The MAT file should contain the following two arrays
gyro : (3, N) float ndarray
The angular velocity measurements.
timestamps : (N, ) float ndarray
Timestamps of the measurements.
Parameters... | 4.401412 | 3.295808 | 1.335458 |
N = len(self.timestamps)
t = self.timestamps[-1] - self.timestamps[0]
rate = 1.0 * N / t
return rate | def rate(self) | Get the sample rate in Hz.
Returns
---------
rate : float
The sample rate, in Hz, calculated from the timestamps | 3.934084 | 3.387308 | 1.161419 |
t1 = t0 + duration
indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1))
m = np.mean(self.gyro_data[:, indices], axis=1)
self.gyro_data -= m.reshape(3,1)
return self.gyro_data | def zero_level_calibrate(self, duration, t0=0.0) | Performs zero-level calibration from the chosen time interval.
This changes the previously lodaded data in-place.
Parameters
--------------------
duration : float
Number of timeunits to use for calibration
t0 : float
Starting tim... | 3.483032 | 3.253083 | 1.070686 |
if uniform:
dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work
return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt)
else:
N = len(self.timestamps)
... | def integrate(self, pose_correction=np.eye(3), uniform=True) | Integrate angular velocity measurements to rotations.
Parameters
-------------
pose_correction : (3,3) ndarray, optional
Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera).
uniform : bool
If True (default), a... | 3.273345 | 3.134402 | 1.044328 |
idx = np.flatnonzero(timestamps >= (t - 0.0001))[0]
t0 = timestamps[idx - 1]
t1 = timestamps[idx]
tau = (t - t0) / (t1 - t0)
q1 = rotation_sequence[:, idx - 1]
q2 = rotation_sequence[:, idx]
q = rotations.slerp(q1, q2, tau)
return q | def rotation_at_time(t, timestamps, rotation_sequence) | Get the gyro rotation at time t using SLERP.
Parameters
-----------
t : float
The query timestamp.
timestamps : array_like float
List of all timestamps
rotation_sequence : (4, N) ndarray
Rotation sequence as unit quaternion... | 2.776367 | 2.65033 | 1.047555 |
instance = cls()
instance.data = np.loadtxt(filename, delimiter=',')
return instance | def from_csv(cls, filename) | Create gyro stream from CSV data
Load data from a CSV file.
The data must be formatted with three values per line: (x, y, z)
where x, y, z is the measured angular velocity (in radians) of the specified axis.
Parameters
-------------------
filename : str
Path... | 4.355601 | 7.018912 | 0.620552 |
if not data.shape[1] == 3:
raise ValueError("Gyroscope data must have shape (N, 3)")
instance = cls()
instance.data = data
return instance | def from_data(cls, data) | Create gyroscope stream from data array
Parameters
-------------------
data : (N, 3) ndarray
Data array of angular velocities (rad/s)
Returns
-------------------
GyroStream
Stream object | 4.638785 | 3.972841 | 1.167624 |
if not dt == self.__last_dt:
self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt)
self.__last_dt = dt
return self.__last_q | def integrate(self, dt) | Integrate gyro measurements to orientation using a uniform sample rate.
Parameters
-------------------
dt : float
Sample distance in seconds
Returns
----------------
orientation : (4, N) ndarray
Gyroscope orientation in quaternion form (s... | 8.208923 | 6.915275 | 1.187071 |
Nc = np.ceil(gstd*3)*2+1
x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True)
g = np.exp(-.5*((x/gstd)**2))
g = g/np.sum(g)
return g | def gaussian_kernel(gstd) | Generate odd sized truncated Gaussian
The generated filter kernel has a cutoff at $3\sigma$
and is normalized to sum to 1
Parameters
-------------
gstd : float
Standard deviation of filter
Returns
-------------
g : ndarray
Array with kernel coefficients | 3.069142 | 3.024447 | 1.014778 |
Ns = np.int(np.floor(np.size(time_series)/downsample_factor))
g = gaussian_kernel(0.5*downsample_factor)
ts_blur = np.convolve(time_series,g,'same')
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = (k+.5)*downsample_factor-.5
cfrac = cpos-np.floor(cpos)
... | def subsample(time_series, downsample_factor) | Subsample with Gaussian prefilter
The prefilter will have the filter size $\sigma_g=.5*ssfactor$
Parameters
--------------
time_series : ndarray
Input signal
downsample_factor : float
Downsampling factor
Returns
--------------
ts_out : ndarray
... | 2.928866 | 2.882844 | 1.015964 |
Ns0 = np.size(time_series)
Ns = np.int(np.floor(np.size(time_series)*scaling_factor))
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])]))
cfrac = cpos-np.floor(cpos)
cind = int(np.floor(cpos)... | def upsample(time_series, scaling_factor) | Upsample using linear interpolation
The function uses replication of the value at edges
Parameters
--------------
time_series : ndarray
Input signal
scaling_factor : float
The factor to upsample with
Returns
--------------
ts_out : ndarray
The ... | 2.768652 | 2.706974 | 1.022785 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.