#!/usr/bin/env python # # This script will warm up the buffer cache with the tables required to run the input # query. This only works on a mini-dfs cluster. This is remarkably difficult to do # since hdfs which tries to hide the details of the block locations from users. # The only way to do this is to # 1. use the java APIs (deprecated, of course) to extract the block ids. # 2. find the files with those block ids on the file system and read them # # First run testdata/bin/generate-block-ids.sh. This will output the block locations # to testdata/block-ids. This file is good as long as the mini-dfs cluster does not # get new files. If the block-ids file is not there, this script will run # generate-block-ids.sh. # # Run this script, passing it the query and it will go read every replica of every # block of every table in the query. import math import os import re import sys import subprocess import tempfile from optparse import OptionParser # Options parser = OptionParser() parser.add_option("-q", "--query", dest="query", default = "", help="Query to run. If none specified, runs all queries.") (options, args) = parser.parse_args() block_ids_file = 'testdata/block-ids' data_node_root = os.environ['MINI_DFS_BASE_DATA_DIR'] + '/dfs/data' block_ids = {} # Parse the block ids file to all the block ids for all the tables # the format of the file is: #