From adc02c017563f51475af552167550ceea6db31ea Mon Sep 17 00:00:00 2001 From: Nikolas Date: Fri, 21 May 2021 09:51:53 -0400 Subject: [PATCH 01/24] added figure file --- figure.py | 263 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) create mode 100644 figure.py diff --git a/figure.py b/figure.py new file mode 100644 index 00000000..35da0cde --- /dev/null +++ b/figure.py @@ -0,0 +1,263 @@ +import os +import time, datetime +import re +import statistics +import sys +import argparse + +import numpy as np + +import matplotlib +import matplotlib.pyplot as plt + + +def create_ax_dumbbell(ax, data, max_stacked=50): + ''' + Create a dumbbell plot of concurrent plot instances over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + + def newline(p1, p2, color='r'): + l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color=color) + ax.add_line(l) + return l + + # Prevent the stack from growing to tall + num_rows = data.shape[0] + stacker = [] + for _ in range(int(np.ceil(num_rows / float(max_stacked)))): + stacker.extend(list(range(max_stacked))) + stacker = np.array(stacker) + stacker = stacker[:-(max_stacked-int(num_rows % float(max_stacked)))] + + for (p1, p2), i in zip(data[:,:2], stacker): + newline([p1, i], [p2, i]) + ax.scatter(data[:,0], stacker, color='b') + ax.scatter(data[:,1], stacker, color='b') + + ax.set_ylabel('Plots') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotrate(ax, data, end=True, window=3): + ''' + Create a plot showing the rate of plotting over time. Can be computed + with respect to the plot start (this is rate of plot creation) or + with respect to the plot end (this is rate of plot completion). + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + end: T/F, compute plot creation or plot completion rate. + window: Window to compute rate over. + ''' + + def estimate_rate(data, window): + rate_list = [] + window_list = [] + # This takes care of when we dont have a full window + for i in range(window): + rate_list.append(data[i] - data[0]) + window_list.append(i) + # This takes care of when we do + for i in range(len(data) - window): + rate_list.append(data[i+window] - data[i]) + window_list.append(window) + rate_list, window_list = np.array(rate_list), np.array(window_list) + rate_list[rate_list == 0] = np.nan # This prevents div by zero error + return np.where(np.logical_not(np.isnan(rate_list)), (window_list-1) / rate_list, 0) + + # Estimate the rate of ending or the rate of starting + if end: + rate = estimate_rate(data[:,1], window) + ax.plot(data[:,1], rate) + else: + rate = estimate_rate(data[:,0], window) + ax.plot(data[:,0], rate) + + ax.set_ylabel('Avg Plot Rate (plots/hour)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plottime(ax, data, window=3): + ''' + Create a plot showing the average time to create a single plot. This is + computed using a moving average. Note that the plot may not be + very accurate for the beginning and ending windows. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + window: Window to compute rate over. + ''' + + # Compute moving avg + kernel = np.ones(window) / window + data_tiled = np.vstack(( + np.expand_dims(data[:,1] - data[:,0], axis=1), + np.tile(data[-1,1] - data[-1,0], (window-1, 1)) + )) + rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode='valid') + + ax.plot(data[:,1], rolling_avg) + + ax.set_ylabel('Avg Plot Time (hours)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotcumulative(ax, data): + ''' + Create a plot showing the cumulative number of plots over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + cumsum = np.cumsum(range(data.shape[0])) + + ax.plot(data[:,1], cumsum) + + ax.set_ylabel('Total plots (plots)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def analyze(logfilenames, bytmp, bybitfield): + data = {} + logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if + os.path.splitext(l)[-1] == '.log'] + + for logfilename in logfilenames: + with open(logfilename, 'r') as f: + # Record of slicing and data associated with the slice + sl = 'x' # Slice key + phase_time = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + is_first_last = False + + # Read the logfile, triggering various behaviors on various + # regex matches. + for line in f: + # Beginning of plot job. We may encounter this multiple + # times, if a job was run with -n > 1. Sample log line: + # 2021-04-08T13:33:43.542 chia.plotting.create_plots : INFO Starting plot 1/5 + m = re.search(r'Starting plot (\d*)/(\d*)', line) + if m: + # (re)-initialize data structures + sl = 'x' # Slice key + phase_time = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + + seq_num = int(m.group(1)) + seq_total = int(m.group(2)) + is_first_last = seq_num == 1 or seq_num == seq_total + + # Temp dirs. Sample log line: + # Starting plotting progress into temporary dirs: /mnt/tmp/01 and /mnt/tmp/a + m = re.search(r'^Starting plotting.*dirs: (.*) and (.*)', line) + if m: + # Record tmpdir, if slicing by it + if bytmp: + tmpdir = m.group(1) + sl += '-' + tmpdir + + # Bitfield marker. Sample log line(s): + # Starting phase 2/4: Backpropagation without bitfield into tmp files... Mon Mar 1 03:56:11 2021 + # or + # Starting phase 2/4: Backpropagation into tmp files... Fri Apr 2 03:17:32 2021 + m = re.search(r'^Starting phase 2/4: Backpropagation', line) + if bybitfield and m: + if 'without bitfield' in line: + sl += '-nobitfield' + else: + sl += '-bitfield' + + # Phase timing. Sample log line: + # Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020 + for phase in ['1', '2', '3', '4']: + m = re.search(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) + if m: + phase_time[phase] = float(m.group(1)) + + # Uniform sort. Sample log line: + # Bucket 267 uniform sort. Ram: 0.920GiB, u_sort min: 0.688GiB, qs min: 0.172GiB. + # or + # ....?.... + # or + # Bucket 511 QS. Ram: 0.920GiB, u_sort min: 0.375GiB, qs min: 0.094GiB. force_qs: 1 + m = re.search(r'Bucket \d+ ([^\.]+)\..*', line) + if m and not 'force_qs' in line: + sorter = m.group(1) + n_sorts += 1 + if sorter == 'uniform sort': + n_uniform += 1 + elif sorter == 'QS': + pass + else: + print ('Warning: unrecognized sort ' + sorter) + + # Job completion. Record total time in sliced data store. + # Sample log line: + # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 + m = re.search(r'^Total time = (\d+.\d+) seconds.', line) + if m: + time_taken = float(m.group(1)) + data.setdefault(sl, {}).setdefault('total time', []).append(time_taken) + for phase in ['1', '2', '3', '4']: + data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) + data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) + + time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) + data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) + data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - time_taken) + + # Prepare report + for sl in data.keys(): + + # This array will hold start and end data (in hours) + data_started_ended = np.array([[ts, te, te-ts] for + ts, te in zip(data[sl]['time started'], data[sl]['time ended']) + ]) / (60 * 60) + + # Sift the data so that it starts at zero + data_started_ended -= np.min(data_started_ended[:, 0]) + + # Sort the rows by start time + data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] + + # Create figure + num_plots = 4 + f, _ = plt.subplots(2,1, figsize=(8, 12)) + ax = plt.subplot(num_plots,1,1) + + create_ax_dumbbell(ax, data_started_ended) + + ax = plt.subplot(num_plots,1,2) + create_ax_plotrate(ax, data_started_ended, end=True, window=3) + + ax = plt.subplot(num_plots,1,3) + create_ax_plottime(ax, data_started_ended, window=3) + + ax = plt.subplot(num_plots,1,4) + create_ax_plotcumulative(ax, data_started_ended) + + ax.set_xlabel('Time (hours)') + f.savefig('test.png') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='') + parser.add_argument( + 'log_dir', + help='directory containing logs to analyze.') + parser.add_argument( + '--bytmp', + action='store_true', + help='slice by tmp dirs') + parser.add_argument( + '--bybitfield', + action='store_true', + help='slice by bitfield/non-bitfield sorting') + args = parser.parse_args() + + analyze(args.log_dir, args.bytmp, args.bybitfield) \ No newline at end of file From 49f2a175f4b58b590e0c6eb84455a41135f91225 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:06:31 -0400 Subject: [PATCH 02/24] integrated figfile contents into analyzer.py --- figure.py | 263 ---------------------------------------- src/plotman/analyzer.py | 234 +++++++++++++++++++++++++++++------ src/plotman/plotman.py | 17 ++- 3 files changed, 209 insertions(+), 305 deletions(-) delete mode 100644 figure.py diff --git a/figure.py b/figure.py deleted file mode 100644 index 35da0cde..00000000 --- a/figure.py +++ /dev/null @@ -1,263 +0,0 @@ -import os -import time, datetime -import re -import statistics -import sys -import argparse - -import numpy as np - -import matplotlib -import matplotlib.pyplot as plt - - -def create_ax_dumbbell(ax, data, max_stacked=50): - ''' - Create a dumbbell plot of concurrent plot instances over time. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - ''' - - def newline(p1, p2, color='r'): - l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color=color) - ax.add_line(l) - return l - - # Prevent the stack from growing to tall - num_rows = data.shape[0] - stacker = [] - for _ in range(int(np.ceil(num_rows / float(max_stacked)))): - stacker.extend(list(range(max_stacked))) - stacker = np.array(stacker) - stacker = stacker[:-(max_stacked-int(num_rows % float(max_stacked)))] - - for (p1, p2), i in zip(data[:,:2], stacker): - newline([p1, i], [p2, i]) - ax.scatter(data[:,0], stacker, color='b') - ax.scatter(data[:,1], stacker, color='b') - - ax.set_ylabel('Plots') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def create_ax_plotrate(ax, data, end=True, window=3): - ''' - Create a plot showing the rate of plotting over time. Can be computed - with respect to the plot start (this is rate of plot creation) or - with respect to the plot end (this is rate of plot completion). - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - end: T/F, compute plot creation or plot completion rate. - window: Window to compute rate over. - ''' - - def estimate_rate(data, window): - rate_list = [] - window_list = [] - # This takes care of when we dont have a full window - for i in range(window): - rate_list.append(data[i] - data[0]) - window_list.append(i) - # This takes care of when we do - for i in range(len(data) - window): - rate_list.append(data[i+window] - data[i]) - window_list.append(window) - rate_list, window_list = np.array(rate_list), np.array(window_list) - rate_list[rate_list == 0] = np.nan # This prevents div by zero error - return np.where(np.logical_not(np.isnan(rate_list)), (window_list-1) / rate_list, 0) - - # Estimate the rate of ending or the rate of starting - if end: - rate = estimate_rate(data[:,1], window) - ax.plot(data[:,1], rate) - else: - rate = estimate_rate(data[:,0], window) - ax.plot(data[:,0], rate) - - ax.set_ylabel('Avg Plot Rate (plots/hour)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def create_ax_plottime(ax, data, window=3): - ''' - Create a plot showing the average time to create a single plot. This is - computed using a moving average. Note that the plot may not be - very accurate for the beginning and ending windows. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - window: Window to compute rate over. - ''' - - # Compute moving avg - kernel = np.ones(window) / window - data_tiled = np.vstack(( - np.expand_dims(data[:,1] - data[:,0], axis=1), - np.tile(data[-1,1] - data[-1,0], (window-1, 1)) - )) - rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode='valid') - - ax.plot(data[:,1], rolling_avg) - - ax.set_ylabel('Avg Plot Time (hours)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def create_ax_plotcumulative(ax, data): - ''' - Create a plot showing the cumulative number of plots over time. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - ''' - cumsum = np.cumsum(range(data.shape[0])) - - ax.plot(data[:,1], cumsum) - - ax.set_ylabel('Total plots (plots)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def analyze(logfilenames, bytmp, bybitfield): - data = {} - logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if - os.path.splitext(l)[-1] == '.log'] - - for logfilename in logfilenames: - with open(logfilename, 'r') as f: - # Record of slicing and data associated with the slice - sl = 'x' # Slice key - phase_time = {} # Map from phase index to time - n_sorts = 0 - n_uniform = 0 - is_first_last = False - - # Read the logfile, triggering various behaviors on various - # regex matches. - for line in f: - # Beginning of plot job. We may encounter this multiple - # times, if a job was run with -n > 1. Sample log line: - # 2021-04-08T13:33:43.542 chia.plotting.create_plots : INFO Starting plot 1/5 - m = re.search(r'Starting plot (\d*)/(\d*)', line) - if m: - # (re)-initialize data structures - sl = 'x' # Slice key - phase_time = {} # Map from phase index to time - n_sorts = 0 - n_uniform = 0 - - seq_num = int(m.group(1)) - seq_total = int(m.group(2)) - is_first_last = seq_num == 1 or seq_num == seq_total - - # Temp dirs. Sample log line: - # Starting plotting progress into temporary dirs: /mnt/tmp/01 and /mnt/tmp/a - m = re.search(r'^Starting plotting.*dirs: (.*) and (.*)', line) - if m: - # Record tmpdir, if slicing by it - if bytmp: - tmpdir = m.group(1) - sl += '-' + tmpdir - - # Bitfield marker. Sample log line(s): - # Starting phase 2/4: Backpropagation without bitfield into tmp files... Mon Mar 1 03:56:11 2021 - # or - # Starting phase 2/4: Backpropagation into tmp files... Fri Apr 2 03:17:32 2021 - m = re.search(r'^Starting phase 2/4: Backpropagation', line) - if bybitfield and m: - if 'without bitfield' in line: - sl += '-nobitfield' - else: - sl += '-bitfield' - - # Phase timing. Sample log line: - # Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020 - for phase in ['1', '2', '3', '4']: - m = re.search(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) - if m: - phase_time[phase] = float(m.group(1)) - - # Uniform sort. Sample log line: - # Bucket 267 uniform sort. Ram: 0.920GiB, u_sort min: 0.688GiB, qs min: 0.172GiB. - # or - # ....?.... - # or - # Bucket 511 QS. Ram: 0.920GiB, u_sort min: 0.375GiB, qs min: 0.094GiB. force_qs: 1 - m = re.search(r'Bucket \d+ ([^\.]+)\..*', line) - if m and not 'force_qs' in line: - sorter = m.group(1) - n_sorts += 1 - if sorter == 'uniform sort': - n_uniform += 1 - elif sorter == 'QS': - pass - else: - print ('Warning: unrecognized sort ' + sorter) - - # Job completion. Record total time in sliced data store. - # Sample log line: - # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 - m = re.search(r'^Total time = (\d+.\d+) seconds.', line) - if m: - time_taken = float(m.group(1)) - data.setdefault(sl, {}).setdefault('total time', []).append(time_taken) - for phase in ['1', '2', '3', '4']: - data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) - data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) - - time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) - data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) - data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - time_taken) - - # Prepare report - for sl in data.keys(): - - # This array will hold start and end data (in hours) - data_started_ended = np.array([[ts, te, te-ts] for - ts, te in zip(data[sl]['time started'], data[sl]['time ended']) - ]) / (60 * 60) - - # Sift the data so that it starts at zero - data_started_ended -= np.min(data_started_ended[:, 0]) - - # Sort the rows by start time - data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] - - # Create figure - num_plots = 4 - f, _ = plt.subplots(2,1, figsize=(8, 12)) - ax = plt.subplot(num_plots,1,1) - - create_ax_dumbbell(ax, data_started_ended) - - ax = plt.subplot(num_plots,1,2) - create_ax_plotrate(ax, data_started_ended, end=True, window=3) - - ax = plt.subplot(num_plots,1,3) - create_ax_plottime(ax, data_started_ended, window=3) - - ax = plt.subplot(num_plots,1,4) - create_ax_plotcumulative(ax, data_started_ended) - - ax.set_xlabel('Time (hours)') - f.savefig('test.png') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='') - parser.add_argument( - 'log_dir', - help='directory containing logs to analyze.') - parser.add_argument( - '--bytmp', - action='store_true', - help='slice by tmp dirs') - parser.add_argument( - '--bybitfield', - action='store_true', - help='slice by bitfield/non-bitfield sorting') - args = parser.parse_args() - - analyze(args.log_dir, args.bytmp, args.bybitfield) \ No newline at end of file diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 67073805..301d8064 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -2,15 +2,134 @@ import re import statistics import sys +import time, datetime import texttable as tt +import numpy as np + +import matplotlib +import matplotlib as plt from plotman import plot_util -def analyze(logfilenames, clipterminals, bytmp, bybitfield): +def create_ax_dumbbell(ax, data, max_stacked=50): + ''' + Create a dumbbell plot of concurrent plot instances over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + + def newline(p1, p2, color='r'): + l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color=color) + ax.add_line(l) + return l + + # Prevent the stack from growing to tall + num_rows = data.shape[0] + stacker = [] + for _ in range(int(np.ceil(num_rows / float(max_stacked)))): + stacker.extend(list(range(max_stacked))) + stacker = np.array(stacker) + stacker = stacker[:-(max_stacked-int(num_rows % float(max_stacked)))] + + for (p1, p2), i in zip(data[:,:2], stacker): + newline([p1, i], [p2, i]) + ax.scatter(data[:,0], stacker, color='b') + ax.scatter(data[:,1], stacker, color='b') + + ax.set_ylabel('Plots') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotrate(ax, data, end=True, window=3): + ''' + Create a plot showing the rate of plotting over time. Can be computed + with respect to the plot start (this is rate of plot creation) or + with respect to the plot end (this is rate of plot completion). + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + end: T/F, compute plot creation or plot completion rate. + window: Window to compute rate over. + ''' + + def estimate_rate(data, window): + rate_list = [] + window_list = [] + # This takes care of when we dont have a full window + for i in range(window): + rate_list.append(data[i] - data[0]) + window_list.append(i) + # This takes care of when we do + for i in range(len(data) - window): + rate_list.append(data[i+window] - data[i]) + window_list.append(window) + rate_list, window_list = np.array(rate_list), np.array(window_list) + rate_list[rate_list == 0] = np.nan # This prevents div by zero error + return np.where(np.logical_not(np.isnan(rate_list)), (window_list-1) / rate_list, 0) + + # Estimate the rate of ending or the rate of starting + if end: + rate = estimate_rate(data[:,1], window) + ax.plot(data[:,1], rate) + else: + rate = estimate_rate(data[:,0], window) + ax.plot(data[:,0], rate) + + ax.set_ylabel('Avg Plot Rate (plots/hour)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plottime(ax, data, window=3): + ''' + Create a plot showing the average time to create a single plot. This is + computed using a moving average. Note that the plot may not be + very accurate for the beginning and ending windows. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + window: Window to compute rate over. + ''' + + # Compute moving avg + kernel = np.ones(window) / window + data_tiled = np.vstack(( + np.expand_dims(data[:,1] - data[:,0], axis=1), + np.tile(data[-1,1] - data[-1,0], (window-1, 1)) + )) + rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode='valid') + + ax.plot(data[:,1], rolling_avg) + + ax.set_ylabel('Avg Plot Time (hours)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotcumulative(ax, data): + ''' + Create a plot showing the cumulative number of plots over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + cumsum = np.cumsum(range(data.shape[0])) + + ax.plot(data[:,1], cumsum) + + ax.set_ylabel('Total plots (plots)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data = {} for logfilename in logfilenames: + + # Make sure this is a valid logfile + if (os.path.splitext(logfilename)[-1] != '.log'): + continue + with open(logfilename, 'r') as f: # Record of slicing and data associated with the slice sl = 'x' # Slice key @@ -94,45 +213,84 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield): data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) - # Prepare report - tab = tt.Texttable() - all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] - headings = ['Slice', 'n'] + all_measures - tab.header(headings) - - for sl in data.keys(): - row = [sl] - - # Sample size - sample_sizes = [] - for measure in all_measures: - values = data.get(sl, {}).get(measure, []) - sample_sizes.append(len(values)) - sample_size_lower_bound = min(sample_sizes) - sample_size_upper_bound = max(sample_sizes) - if sample_size_lower_bound == sample_size_upper_bound: - row.append('%d' % sample_size_lower_bound) - else: - row.append('%d-%d' % (sample_size_lower_bound, sample_size_upper_bound)) - - # Phase timings - for measure in all_measures: - values = data.get(sl, {}).get(measure, []) - if(len(values) > 1): - row.append('μ=%s σ=%s' % ( - plot_util.human_format(statistics.mean(values), 1), - plot_util.human_format(statistics.stdev(values), 0) - )) - elif(len(values) == 1): - row.append(plot_util.human_format(values[0], 1)) + # Grab the time ended, compute the time started + time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) + data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) + data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - float(m.group(1))) + + if figfile is None: + # Prepare report + for sl in data.keys(): + + # This array will hold start and end data (in hours) + data_started_ended = np.array([[ts, te, te-ts] for + ts, te in zip(data[sl]['time started'], data[sl]['time ended']) + ]) / (60 * 60) + + # Sift the data so that it starts at zero + data_started_ended -= np.min(data_started_ended[:, 0]) + + # Sort the rows by start time + data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] + + # Create figure + num_plots = 4 + f, _ = plt.subplots(2,1, figsize=(8, 12)) + ax = plt.subplot(num_plots,1,1) + + create_ax_dumbbell(ax, data_started_ended) + + ax = plt.subplot(num_plots,1,2) + create_ax_plotrate(ax, data_started_ended, end=True, window=3) + + ax = plt.subplot(num_plots,1,3) + create_ax_plottime(ax, data_started_ended, window=3) + + ax = plt.subplot(num_plots,1,4) + create_ax_plotcumulative(ax, data_started_ended) + + ax.set_xlabel('Time (hours)') + f.savefig(figfile) + else: + # Prepare report + tab = tt.Texttable() + all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] + headings = ['Slice', 'n'] + all_measures + tab.header(headings) + + for sl in data.keys(): + row = [sl] + + # Sample size + sample_sizes = [] + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + sample_sizes.append(len(values)) + sample_size_lower_bound = min(sample_sizes) + sample_size_upper_bound = max(sample_sizes) + if sample_size_lower_bound == sample_size_upper_bound: + row.append('%d' % sample_size_lower_bound) else: - row.append('N/A') + row.append('%d-%d' % (sample_size_lower_bound, sample_size_upper_bound)) + + # Phase timings + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + if(len(values) > 1): + row.append('μ=%s σ=%s' % ( + plot_util.human_format(statistics.mean(values), 1), + plot_util.human_format(statistics.stdev(values), 0) + )) + elif(len(values) == 1): + row.append(plot_util.human_format(values[0], 1)) + else: + row.append('N/A') - tab.add_row(row) + tab.add_row(row) - (rows, columns) = os.popen('stty size', 'r').read().split() - tab.set_max_width(int(columns)) - s = tab.draw() - print(s) + (rows, columns) = os.popen('stty size', 'r').read().split() + tab.set_max_width(int(columns)) + s = tab.draw() + print(s) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 35e1616f..ee2f79ca 100755 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -72,8 +72,12 @@ def parse_args(self): p_analyze.add_argument('--bybitfield', action='store_true', help='slice by bitfield/non-bitfield sorting') - p_analyze.add_argument('logfile', type=str, nargs='+', + p_analyze.add_argument('--logfile', type=str, nargs='+', help='logfile(s) to analyze') + p_analyze.add_argument('--logdir', type=str, + help='directory containing multiple logfiles to analyze') + p_analyze.add_argument('--figfile', type=str, default='analysis.png', + help='figure to be created if logdir is passed.') args = parser.parse_args() return args @@ -155,9 +159,14 @@ def main(): # Analysis of completed jobs # elif args.cmd == 'analyze': - - analyzer.analyze(args.logfile, args.clipterminals, - args.bytmp, args.bybitfield) + if args.logfile is not None: + analyzer.analyze(args.logfile, args.clipterminals, + args.bytmp, args.bybitfield, figfile=None) + elif args.logdir is not None: + analyzer.analyze(args.logfile, args.clipterminals, + args.bytmp, args.bybitfield, figfile=args.figfile) + else: + raise RuntimeError('Must pass a log file (--logfile) or a directory containing multiple log files (--logdir).') else: jobs = Job.get_running_jobs(cfg.directories.log) From 4914ee22deec01f8277a06c3a203ceb6beb8309f Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:09:03 -0400 Subject: [PATCH 03/24] passed logfile where I should have passed logdir --- src/plotman/plotman.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index ee2f79ca..1348cdc7 100755 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -77,7 +77,7 @@ def parse_args(self): p_analyze.add_argument('--logdir', type=str, help='directory containing multiple logfiles to analyze') p_analyze.add_argument('--figfile', type=str, default='analysis.png', - help='figure to be created if logdir is passed.') + help='figure to be created if logdir is passed') args = parser.parse_args() return args @@ -163,7 +163,7 @@ def main(): analyzer.analyze(args.logfile, args.clipterminals, args.bytmp, args.bybitfield, figfile=None) elif args.logdir is not None: - analyzer.analyze(args.logfile, args.clipterminals, + analyzer.analyze(args.logdir, args.clipterminals, args.bytmp, args.bybitfield, figfile=args.figfile) else: raise RuntimeError('Must pass a log file (--logfile) or a directory containing multiple log files (--logdir).') From c0a75b569cb0abdbef529aabad03412cb13e21b8 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:10:54 -0400 Subject: [PATCH 04/24] default logdir and logfile should now be none --- src/plotman/plotman.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 1348cdc7..bfd7a984 100755 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -72,9 +72,9 @@ def parse_args(self): p_analyze.add_argument('--bybitfield', action='store_true', help='slice by bitfield/non-bitfield sorting') - p_analyze.add_argument('--logfile', type=str, nargs='+', + p_analyze.add_argument('--logfile', type=str, nargs='+', default=None, help='logfile(s) to analyze') - p_analyze.add_argument('--logdir', type=str, + p_analyze.add_argument('--logdir', type=str, default=None, help='directory containing multiple logfiles to analyze') p_analyze.add_argument('--figfile', type=str, default='analysis.png', help='figure to be created if logdir is passed') From 3eda2a1991f1836cd6de254402c217d826a3ecbb Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:15:14 -0400 Subject: [PATCH 05/24] the figfile condition to run new code was flipped --- src/plotman/analyzer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 301d8064..942c4e4b 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -218,7 +218,7 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - float(m.group(1))) - if figfile is None: + if figfile is not None: # Prepare report for sl in data.keys(): From ba577f8e1f3724bc2032d287d3da104f66f72645 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:15:57 -0400 Subject: [PATCH 06/24] removed period for consistency --- src/plotman/plotman.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index bfd7a984..76628aa3 100755 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -166,7 +166,7 @@ def main(): analyzer.analyze(args.logdir, args.clipterminals, args.bytmp, args.bybitfield, figfile=args.figfile) else: - raise RuntimeError('Must pass a log file (--logfile) or a directory containing multiple log files (--logdir).') + raise RuntimeError('Must pass a log file (--logfile) or a directory containing multiple log files (--logdir)') else: jobs = Job.get_running_jobs(cfg.directories.log) From 56d53635f30f7dc85e2c6ab3d5eed08e4f0b46be Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:22:25 -0400 Subject: [PATCH 07/24] forgot to add code that converts directory to list of files --- src/plotman/analyzer.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 942c4e4b..f49818d6 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -124,12 +124,13 @@ def create_ax_plotcumulative(ax, data): def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data = {} - for logfilename in logfilenames: - - # Make sure this is a valid logfile - if (os.path.splitext(logfilename)[-1] != '.log'): - continue + + # Figfile now also acts like a switch between passing a directory or a single log file + if figfile is not None: + logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if + os.path.splitext(l)[-1] == '.log'] + for logfilename in logfilenames: with open(logfilename, 'r') as f: # Record of slicing and data associated with the slice sl = 'x' # Slice key @@ -249,6 +250,7 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): ax = plt.subplot(num_plots,1,4) create_ax_plotcumulative(ax, data_started_ended) + print('Saving analysis figure to {}'.format(figfile)) ax.set_xlabel('Time (hours)') f.savefig(figfile) else: From bf7f02c7a2e96fed41115c27195457c31847a815 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:23:58 -0400 Subject: [PATCH 08/24] pyplot imported incorrectly --- src/plotman/analyzer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index f49818d6..0797a4f4 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -8,7 +8,7 @@ import numpy as np import matplotlib -import matplotlib as plt +import matplotlib.pyplot as plt from plotman import plot_util From 8d063933431c2c380effa9fc01d039e117422dc1 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:41:34 -0400 Subject: [PATCH 09/24] fig file now passable for either log file or log dir --- setup.cfg | 1 + src/plotman/analyzer.py | 82 ++++++++++++++++++++--------------------- src/plotman/plotman.py | 6 +-- 3 files changed, 45 insertions(+), 44 deletions(-) diff --git a/setup.cfg b/setup.cfg index a85356b9..08bfc1ed 100644 --- a/setup.cfg +++ b/setup.cfg @@ -45,6 +45,7 @@ install_requires = psutil ~= 5.8 pyyaml ~= 5.4 texttable ~= 1.6 + matplotlib ~= 3.4.2 [options.packages.find] where=src diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 0797a4f4..4bc274c1 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -125,8 +125,8 @@ def create_ax_plotcumulative(ax, data): def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data = {} - # Figfile now also acts like a switch between passing a directory or a single log file - if figfile is not None: + # Get valid logfiles if we were passed a directory + if not isinstance(logfilenames, list) and os.path.isdir(figfile): logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if os.path.splitext(l)[-1] == '.log'] @@ -253,46 +253,46 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): print('Saving analysis figure to {}'.format(figfile)) ax.set_xlabel('Time (hours)') f.savefig(figfile) - else: - # Prepare report - tab = tt.Texttable() - all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] - headings = ['Slice', 'n'] + all_measures - tab.header(headings) - for sl in data.keys(): - row = [sl] - - # Sample size - sample_sizes = [] - for measure in all_measures: - values = data.get(sl, {}).get(measure, []) - sample_sizes.append(len(values)) - sample_size_lower_bound = min(sample_sizes) - sample_size_upper_bound = max(sample_sizes) - if sample_size_lower_bound == sample_size_upper_bound: - row.append('%d' % sample_size_lower_bound) + # Prepare report + tab = tt.Texttable() + all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] + headings = ['Slice', 'n'] + all_measures + tab.header(headings) + + for sl in data.keys(): + row = [sl] + + # Sample size + sample_sizes = [] + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + sample_sizes.append(len(values)) + sample_size_lower_bound = min(sample_sizes) + sample_size_upper_bound = max(sample_sizes) + if sample_size_lower_bound == sample_size_upper_bound: + row.append('%d' % sample_size_lower_bound) + else: + row.append('%d-%d' % (sample_size_lower_bound, sample_size_upper_bound)) + + # Phase timings + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + if(len(values) > 1): + row.append('μ=%s σ=%s' % ( + plot_util.human_format(statistics.mean(values), 1), + plot_util.human_format(statistics.stdev(values), 0) + )) + elif(len(values) == 1): + row.append(plot_util.human_format(values[0], 1)) else: - row.append('%d-%d' % (sample_size_lower_bound, sample_size_upper_bound)) - - # Phase timings - for measure in all_measures: - values = data.get(sl, {}).get(measure, []) - if(len(values) > 1): - row.append('μ=%s σ=%s' % ( - plot_util.human_format(statistics.mean(values), 1), - plot_util.human_format(statistics.stdev(values), 0) - )) - elif(len(values) == 1): - row.append(plot_util.human_format(values[0], 1)) - else: - row.append('N/A') - - tab.add_row(row) - - (rows, columns) = os.popen('stty size', 'r').read().split() - tab.set_max_width(int(columns)) - s = tab.draw() - print(s) + row.append('N/A') + + tab.add_row(row) + + (rows, columns) = os.popen('stty size', 'r').read().split() + tab.set_max_width(int(columns)) + s = tab.draw() + print(s) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 76628aa3..7338b7fa 100755 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -76,7 +76,7 @@ def parse_args(self): help='logfile(s) to analyze') p_analyze.add_argument('--logdir', type=str, default=None, help='directory containing multiple logfiles to analyze') - p_analyze.add_argument('--figfile', type=str, default='analysis.png', + p_analyze.add_argument('--figfile', type=str, default=None, help='figure to be created if logdir is passed') args = parser.parse_args() @@ -161,10 +161,10 @@ def main(): elif args.cmd == 'analyze': if args.logfile is not None: analyzer.analyze(args.logfile, args.clipterminals, - args.bytmp, args.bybitfield, figfile=None) + args.bytmp, args.bybitfield, args.figfile) elif args.logdir is not None: analyzer.analyze(args.logdir, args.clipterminals, - args.bytmp, args.bybitfield, figfile=args.figfile) + args.bytmp, args.bybitfield, args.figfile) else: raise RuntimeError('Must pass a log file (--logfile) or a directory containing multiple log files (--logdir)') From 84580131e753c90ee466600ccf0d491c8f378090 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:45:01 -0400 Subject: [PATCH 10/24] assertation prevents generating figure with too few datapoints --- src/plotman/analyzer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 4bc274c1..15e5a462 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -227,6 +227,7 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data_started_ended = np.array([[ts, te, te-ts] for ts, te in zip(data[sl]['time started'], data[sl]['time ended']) ]) / (60 * 60) + assert data_started_ended.shape[0] >= 3, 'Cannot generate figure with less than 3 datapoints ({} datapoints passed)'.format(data_started_ended.shape[0]) # Sift the data so that it starts at zero data_started_ended -= np.min(data_started_ended[:, 0]) From 876724f7a3277424e4ab237a4a7c4a7a643c9564 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 10:47:52 -0400 Subject: [PATCH 11/24] directory handling was passed figfile instead of logfilenames --- src/plotman/analyzer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 15e5a462..ef65ed72 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -126,7 +126,7 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): data = {} # Get valid logfiles if we were passed a directory - if not isinstance(logfilenames, list) and os.path.isdir(figfile): + if not isinstance(logfilenames, list) and os.path.isdir(logfilenames): logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if os.path.splitext(l)[-1] == '.log'] From 866dbd3e7de85d19349faa2c9e27ab51532b3e99 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Fri, 21 May 2021 11:11:35 -0400 Subject: [PATCH 12/24] fixed bug with cumulative plot --- src/plotman/analyzer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index ef65ed72..617b1312 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -114,9 +114,7 @@ def create_ax_plotcumulative(ax, data): ax: a matplotlib axis. data: numpy arrary with [start times, end times]. ''' - cumsum = np.cumsum(range(data.shape[0])) - - ax.plot(data[:,1], cumsum) + ax.plot(data[:,1], range(data.shape[0])) ax.set_ylabel('Total plots (plots)') ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) @@ -239,6 +237,7 @@ def analyze(logfilenames, clipterminals, bytmp, bybitfield, figfile): num_plots = 4 f, _ = plt.subplots(2,1, figsize=(8, 12)) ax = plt.subplot(num_plots,1,1) + ax.set_title('Plot performance summary') create_ax_dumbbell(ax, data_started_ended) From f6c958f9acf99db9366d6ee2eeb892cafc3cc263 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Mon, 21 Jun 2021 21:01:00 -0400 Subject: [PATCH 13/24] Revert "fix: avoid more missing process errors" This reverts commit 170c4d96ca6fa7a067b95fd0821e143b3eff968b. --- src/plotman/job.py | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/src/plotman/job.py b/src/plotman/job.py index f818fccc..1c225ab9 100644 --- a/src/plotman/job.py +++ b/src/plotman/job.py @@ -140,17 +140,12 @@ def get_running_jobs(logroot, cached_jobs=()): with contextlib.ExitStack() as exit_stack: processes = [] - pids = set() - ppids = set() - for process in psutil.process_iter(): # Ignore processes which most likely have terminated between the time of # iteration and data access. with contextlib.suppress(psutil.NoSuchProcess, psutil.AccessDenied): exit_stack.enter_context(process.oneshot()) if is_plotting_cmdline(process.cmdline()): - ppids.add(process.ppid()) - pids.add(process.pid) processes.append(process) # https://github.com/ericaltendorf/plotman/pull/418 @@ -160,6 +155,8 @@ def get_running_jobs(logroot, cached_jobs=()): # both identified as plot processes. Only the child is # really plotting. Filter out the parent. + pids = {process.pid for process in processes} + ppids = {process.ppid() for process in processes} wanted_pids = pids - ppids wanted_processes = [ @@ -169,24 +166,23 @@ def get_running_jobs(logroot, cached_jobs=()): ] for proc in wanted_processes: - with contextlib.suppress(psutil.NoSuchProcess, psutil.AccessDenied): - if proc.pid in cached_jobs_by_pid.keys(): - jobs.append(cached_jobs_by_pid[proc.pid]) # Copy from cache - else: - with proc.oneshot(): - parsed_command = parse_chia_plots_create_command_line( - command_line=proc.cmdline(), - ) - if parsed_command.error is not None: - continue - job = Job( - proc=proc, - parsed_command=parsed_command, - logroot=logroot, - ) - if job.help: - continue - jobs.append(job) + if proc.pid in cached_jobs_by_pid.keys(): + jobs.append(cached_jobs_by_pid[proc.pid]) # Copy from cache + else: + with proc.oneshot(): + parsed_command = parse_chia_plots_create_command_line( + command_line=proc.cmdline(), + ) + if parsed_command.error is not None: + continue + job = Job( + proc=proc, + parsed_command=parsed_command, + logroot=logroot, + ) + if job.help: + continue + jobs.append(job) return jobs From 375a90e75352167a9f3e7d1c4c1a4b5b942b3b32 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Mon, 21 Jun 2021 21:14:53 -0400 Subject: [PATCH 14/24] updating fork --- .coveragerc | 19 + .github/ISSUE_TEMPLATE/bug_report.md | 43 + .github/ISSUE_TEMPLATE/config.yml | 8 + .github/ISSUE_TEMPLATE/request.md | 17 + .github/pull_request_template.md | 1 + .github/workflows/ci.yml | 286 +++ .gitignore | 5 + CHANGELOG.md | 89 + LICENSE | 201 ++ LICENSE-chia-blockchain | 201 ++ MAINTENANCE.md | 19 + MANIFEST.in | 11 + README.md | 243 ++ VERSION | 1 + mypy.ini | 26 + pyproject.toml | 3 + setup.cfg | 83 + setup.py | 4 + src/plotman/__init__.py | 0 src/plotman/__main__.py | 10 + src/plotman/_tests/__init__.py | 0 src/plotman/_tests/archive_test.py | 6 + src/plotman/_tests/configuration_test.py | 104 + src/plotman/_tests/job_test.py | 147 ++ src/plotman/_tests/log_parser_test.py | 59 + src/plotman/_tests/manager_test.py | 100 + src/plotman/_tests/plot_util_test.py | 66 + src/plotman/_tests/reporting_test.py | 81 + .../2021-04-04T19_00_47.681088-0400.log | 2089 +++++++++++++++++ .../2021-04-04T19_00_47.681088-0400.notes | 8 + src/plotman/_tests/resources/__init__.py | 0 src/plotman/analyzer.py | 185 ++ src/plotman/archive.py | 258 ++ src/plotman/chia.py | 371 +++ src/plotman/chiapos.py | 87 + src/plotman/configuration.py | 322 +++ src/plotman/csv_exporter.py | 134 ++ src/plotman/interactive.py | 361 +++ src/plotman/job.py | 517 ++++ src/plotman/log_parser.py | 145 ++ src/plotman/manager.py | 204 ++ src/plotman/plot_util.py | 142 ++ src/plotman/plotinfo.py | 112 + src/plotman/plotman.py | 333 +++ src/plotman/reporting.py | 275 +++ src/plotman/resources/__init__.py | 0 src/plotman/resources/plotman.yaml | 150 ++ src/plotman/resources/target_definitions.yaml | 64 + tox.ini | 36 + util/listlogs | 79 + 50 files changed, 7705 insertions(+) create mode 100644 .coveragerc create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/request.md create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 LICENSE create mode 100644 LICENSE-chia-blockchain create mode 100644 MAINTENANCE.md create mode 100644 MANIFEST.in create mode 100644 README.md create mode 100644 VERSION create mode 100644 mypy.ini create mode 100644 pyproject.toml create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 src/plotman/__init__.py create mode 100644 src/plotman/__main__.py create mode 100644 src/plotman/_tests/__init__.py create mode 100644 src/plotman/_tests/archive_test.py create mode 100644 src/plotman/_tests/configuration_test.py create mode 100644 src/plotman/_tests/job_test.py create mode 100644 src/plotman/_tests/log_parser_test.py create mode 100644 src/plotman/_tests/manager_test.py create mode 100644 src/plotman/_tests/plot_util_test.py create mode 100644 src/plotman/_tests/reporting_test.py create mode 100644 src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.log create mode 100644 src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.notes create mode 100644 src/plotman/_tests/resources/__init__.py create mode 100644 src/plotman/analyzer.py create mode 100644 src/plotman/archive.py create mode 100644 src/plotman/chia.py create mode 100644 src/plotman/chiapos.py create mode 100644 src/plotman/configuration.py create mode 100644 src/plotman/csv_exporter.py create mode 100644 src/plotman/interactive.py create mode 100644 src/plotman/job.py create mode 100644 src/plotman/log_parser.py create mode 100644 src/plotman/manager.py create mode 100644 src/plotman/plot_util.py create mode 100644 src/plotman/plotinfo.py create mode 100644 src/plotman/plotman.py create mode 100644 src/plotman/reporting.py create mode 100644 src/plotman/resources/__init__.py create mode 100644 src/plotman/resources/plotman.yaml create mode 100644 src/plotman/resources/target_definitions.yaml create mode 100644 tox.ini create mode 100644 util/listlogs diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..e6661885 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,19 @@ +[paths] +source = + src + */site-packages + +[report] +precision = 1 +exclude_lines = + pragma: no cover + abc\.abstractmethod + typing\.overload + if typing.TYPE_CHECKING: + ^\s*pass\s*$ + ^\s*...\s*$ + +[run] +branch = True +source = + plotman diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..4eb8c50d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,43 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'bug' +assignees: '' + +--- + + + +**Describe the bug** + + +**To Reproduce** + +Steps to reproduce the behavior, e.g.: +1. Set up config with '...' +2. Run other programs '....' +3. Run Plotman with '....' +4. See error + +**Expected behavior** + + +**System setup:** + - OS: [e.g. Ubuntu, iOS, ...] + - Method of archiving (e.g., none, rsyncd, rsync, ...) + +**Config** +
full configuration + +```yaml +# paste your complete configuration file contents here. +``` + +
+ +**Additional context & screenshots** + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..2d9d569b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +contact_links: + - about: Ask a general question or request support in Discussions + name: Ask for Support + url: >- + https://github.com/ericaltendorf/plotman/discussions/new + - about: Get support on the Chia Keybase's dedicated plotman channel. + name: Join the Keybase.io plotman channel + url: 'https://keybase.io/team/chia_network.public' diff --git a/.github/ISSUE_TEMPLATE/request.md b/.github/ISSUE_TEMPLATE/request.md new file mode 100644 index 00000000..8d08b47c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Log a request for a new feature or enhancement +title: '' +labels: 'enhancement' +assignees: '' + +--- + +**Describe the request** + + +**Images** + + +**Additional comments** + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..9cd4ac83 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1 @@ + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..bd920689 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,286 @@ +name: CI + +on: + push: + branches: + - main + - development + tags: [ "**" ] + pull_request: + branches: [ "**" ] + +defaults: + run: + shell: bash + +jobs: + build: + # Should match JOB_NAME below + name: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + runs-on: ${{ matrix.os.runs-on }} + strategy: + fail-fast: false + matrix: + os: + - name: Linux + runs-on: ubuntu-latest + matrix: linux + python: + - name: CPython 3.8 + tox: py38 + action: 3.8 + task: + - name: Build + tox: build + + env: + # Should match name above + JOB_NAME: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Set up ${{ matrix.python.name }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python.action }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + python -m pip install build check-manifest twine + + - uses: twisted/python-info-action@v1 + + - name: Build + run: | + check-manifest --verbose . + + python -m build --sdist --outdir dist/ . + + mkdir empty/ + cd empty + + tar -xvf ../dist/* + cd * + + # build the wheel from the sdist + python -m build --wheel --outdir ../../dist/ . + cd ../../ + + twine check dist/* + + - name: Publish + uses: actions/upload-artifact@v2 + with: + name: dist + path: dist/ + + test: + # Should match JOB_NAME below + name: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + runs-on: ${{ matrix.os.runs-on }} + needs: + - build + strategy: + fail-fast: false + matrix: + os: + - name: Linux + runs-on: ubuntu-latest + matrix: linux + - name: macOS + runs-on: macos-latest + matrix: macos + python: + - name: CPython 3.7 + tox: py37 + action: 3.7 + - name: CPython 3.8 + tox: py38 + action: 3.8 + - name: CPython 3.9 + tox: py39 + action: 3.9 + task: + - name: Test + tox: test + coverage: true + - name: Check hints + tox: check-hints + include: + - task: + name: Check manifest + tox: check-manifest + os: + name: Linux + runs-on: ubuntu-latest + python: + name: CPython 3.8 + action: 3.8 + + + env: + # Should match name above + JOB_NAME: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + TOXENV: ${{ matrix.task.tox }}${{ fromJSON('["", "-"]')[matrix.python.tox != null] }}${{ matrix.python.tox }} + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Download package files + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + + - name: Set up ${{ matrix.python.name }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python.action }} + + - name: Generate extra locales (Linux) + if: ${{ matrix.os.matrix == 'linux' }} + run: | + sudo apt-get update + sudo apt-get install --yes tzdata locales + sudo locale-gen en_US.UTF-8 de_DE.UTF-8 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + pip install tox + + - name: Prepare tox environment + run: | + tox --notest --installpkg dist/*.whl + + - name: Runner info + uses: twisted/python-info-action@v1 + + - name: Tox info + uses: twisted/python-info-action@v1 + with: + python-path: .tox/${{ env.TOXENV }}/*/python + + - name: Test + run: | + tox --skip-pkg-install + + - name: Coverage Processing + if: matrix.task.coverage + run: | + mkdir coverage_reports + cp .coverage "coverage_reports/.coverage.${{ env.JOB_NAME }}" + cp coverage.xml "coverage_reports/coverage.${{ env.JOB_NAME }}.xml" + + - name: Publish Coverage + if: matrix.task.coverage + uses: actions/upload-artifact@v2 + with: + name: coverage + path: coverage_reports/* + + coverage: + # Should match JOB_NAME below + name: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + runs-on: ${{ matrix.os.runs-on }} + needs: + - test + strategy: + fail-fast: false + matrix: + include: + - os: + name: Linux + runs-on: ubuntu-latest + python: + name: CPython 3.8 + action: 3.8 + task: + name: Coverage + tox: check-coverage + coverage: false + download_coverage: true + + env: + # Should match name above + JOB_NAME: ${{ matrix.task.name }} - ${{ matrix.os.name }} ${{ matrix.python.name }} + TOXENV: ${{ matrix.task.tox }}${{ fromJSON('["", "-"]')[matrix.task.tox != null && matrix.python.tox != null] }}${{ matrix.python.tox }} + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Download package files + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + + - name: Download Coverage + if: matrix.task.download_coverage + uses: actions/download-artifact@v2 + with: + name: coverage + path: coverage_reports + + - name: Set up ${{ matrix.python.name }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python.action }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + pip install tox + + - name: Prepare tox environment + run: | + tox --notest --installpkg dist/*.whl + + - name: Runner info + uses: twisted/python-info-action@v1 + + - name: Tox info + uses: twisted/python-info-action@v1 + with: + python-path: .tox/${{ env.TOXENV }}/*/python + + - name: Run tox environment + env: + BASE_REF: ${{ fromJSON(format('[{0}, {1}]', toJSON(github.event.before), toJSON(format('origin/{0}', github.base_ref))))[github.base_ref != ''] }} + run: | + tox --skip-pkg-install -- --compare-branch="${BASE_REF}" + + - name: Coverage Processing + if: always() + run: | + mkdir all_coverage_report + cp .coverage "all_coverage_report/.coverage.all" + cp coverage.xml "all_coverage_report/coverage.all.xml" + + - name: Upload Coverage + if: always() + uses: actions/upload-artifact@v2 + with: + name: coverage + path: all_coverage_report/* + + all: + name: All + runs-on: ubuntu-latest + needs: + - build + - test + # TODO: make this required when we have a better testing situation + # - coverage + steps: + - name: This + shell: python + run: import this diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..ca379a55 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +__pycache__ +venv +.DS_Store +.vscode +src/plotman.egg-info diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..ab0c93c6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,89 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [unreleased] +### Added +- `plotman export` command to output summaries from plot logs in `.csv` format. + ([#557](https://github.com/ericaltendorf/plotman/pull/557)) +- `--json` option for `plotman status`. + ([#549](https://github.com/ericaltendorf/plotman/pull/549)) +- If the tmp drive selected for a plot is also listed as a dst drive then plotman will use the same drive for both. + ([#643](https://github.com/ericaltendorf/plotman/pull/643)) +- `plotman prometheus` command to output status for consumption by [Prometheus](https://prometheus.io/). + ([#430](https://github.com/ericaltendorf/plotman/pull/430)) +- `plotman logs` command to print and tail plot logs by their plot ID. + ([#509](https://github.com/ericaltendorf/plotman/pull/509)) + +## [0.4.1] - 2021-06-11 +### Fixed +- Archival disk space check finds drives with multiple mount points again. + This fixes a regression introduced in v0.4.1. + ([#773](https://github.com/ericaltendorf/plotman/issues/773)) +- `plotman dirs` does not fail for every invocation. + `TypeError: dirs_report() missing 1 required positional argument: 'width'` + ([#778](https://github.com/ericaltendorf/plotman/issues/778)) + +## [0.4] - 2021-06-10 +### Fixed +- More accurately calculates expected size of plots. +- Archival requires only minimal extra space on target drive. + The required space is based on the size of the actual plot to be transferred. + Previously a 20% (~20GB) margin was required relative to a rough approximation of plot size. +- Identify more cases of chia plotting processes such as on NixOS. +- Avoid some more `NoSuchProcess` and `AccessDenied` errors when identifying plotting processes. +- Avoid crashing when parsing plotting process logs fails to decode due to `UnicodeDecodeError`. +- Avoid crashing when a tmp file is removed while we are checking a job's tmp usage. +- Windows is not yet supported, but plot and archive processes are now launched to be independent of the plotman process on Windows as it already was on Linux. +### Added +- Configuration file is versioned. + The config for previous plotman versions has been retroactively defined to be version 0 + The new version is 1. + An error will be raised when you launch plotman with a configuration file whose version does not match the expected configuration version. + That error will include a link to the wiki to help understand the needed changes. + See [the wiki configuration page](https://github.com/ericaltendorf/plotman/wiki/Configuration#1-v04). +- Archiving configuration has been reworked offering both a simple builtin local archiving setup as well as arbitrary configuration of the disk space check and transfer operations. + See [the wiki archiving page](https://github.com/ericaltendorf/plotman/wiki/Archiving) +- The `directories:` `dst:` section is optional. + If not specified then generally the tmp drive for the plot will be used as dst. + If tmp2 is specified then it will be used as dst. +- Along with plot logs, there are now archive transfer logs and an overall plotman log. + This helps with diagnosing issues with both the archival disk space check and the archival transfers. + The paths are configurable under `logging:` via `plots:` (directory), `transfers:` (directory), and `application:` (file). +- Added support for `-c`/`--pool_contract_address`. + Configurable as `plotting:` `pool_contract_address:`. +- Interactive can be launched with plotting and archiving inactive. + This is available via the configuration file in `commands:` `interactive:` `autostart_plotting:` and `autostart_archiving:`. + They are also available on the command line as `--[no-]autostart-plotting` and `--[no-]autostart-archiving`. +- Uses `i` to differentiate between gigabytes vs. gibibytes, for example. + `Gi` vs. `G`. + +## [0.3.1] - 2021-05-13 +Changes not documented. +Bug fixes for v0.3.1. + +## [0.3] - 2021-05-12 +Changes not documented. + +## [0.2] - 2021-04-20 +Changes not documented. + +## [0.1.1] - 2021-02-07 +### Fixed +- Find jobs more reliably by inspecting cmdline instead of "process name" +- checked-in config.yaml now conforms to code's expectations! +### Added +- Job progress histogram view in `interactive` mode +- Ability to disable archival (by commenting out the config section) +- Minor improvements to messages, titles, tables in interactive mode + +## [0.1.0] - 2021-01-31 +### Fixed +- Fixed issue with prioritization of tmp dirs + +## [0.0.1] - 2021-01-30 +### Added +- `.gitignore` and `CHANGELOG.md` diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..3ea2c2ce --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Eric Altendorf + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE-chia-blockchain b/LICENSE-chia-blockchain new file mode 100644 index 00000000..ee81ae2a --- /dev/null +++ b/LICENSE-chia-blockchain @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Chia Network + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTENANCE.md b/MAINTENANCE.md new file mode 100644 index 00000000..b8b17c2d --- /dev/null +++ b/MAINTENANCE.md @@ -0,0 +1,19 @@ +# Maintenance + +## Overview + +This document holds guidance on maintaining aspects of plotman. + +## The `chia plots create` CLI parsing code + +In [src/plotman/chia.py](src/plotman/chia.py) there is code copied from the `chia plots create` subcommand's CLI parser definition. +When new versions of `chia-blockchain` are released, their interface code should be added to plotman. +plotman commit [1b5db4e](https://github.com/ericaltendorf/plotman/commit/1b5db4e342b9ec1f7910663a453aec3a97ba51a6) provides an example of adding a new version. + +In many cases, copying code is a poor choice. +It is believed that in this case it is appropriate since the chia code that plotman could import is not necessarily the code that is parsing the plotting process command lines anyways. +The chia command could come from another Python environment, a system package, a `.dmg`, etc. +This approach also offers future potential of using the proper version of parsing for the specific plot process being inspected. +Finally, this alleviates dealing with the dependency on the `chia-blockchain` package. +In generally, using dependencies is good. +This seems to be an exceptional case. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..08d64bbe --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,11 @@ +include CHANGELOG.md +include LICENSE* +include README.md +include *.md +include VERSION +include mypy.ini +include tox.ini +include .coveragerc +recursive-include src *.py +recursive-include src/plotman/_tests/resources * +recursive-include src/plotman/resources * diff --git a/README.md b/README.md new file mode 100644 index 00000000..09decd46 --- /dev/null +++ b/README.md @@ -0,0 +1,243 @@ +# `plotman`: a Chia plotting manager + +This is a tool for managing [Chia](https://github.com/Chia-Network/chia-blockchain) +plotting operations. The tool runs on the plotting machine and provides +the following functionality: + +- Automatic spawning of new plotting jobs, possibly overlapping ("staggered") + on multiple temp directories, rate-limited globally and by per-temp-dir +limits. + +- Rsync'ing of newly generated plots to a remote host (a farmer/harvester), + called "archiving". + +- Monitoring of ongoing plotting and archiving jobs, progress, resources used, + temp files, etc. + +- Control of ongoing plotting jobs (suspend, resume, plus kill and clean up + temp files). + +- Both an interactive live dashboard mode as well as command line mode tools. + +- (very alpha) Analyzing performance statistics of past jobs, to aggregate on + various plotting parameters or temp dir type. + +Plotman is designed for the following configuration: + +- A plotting machine with an array of `tmp` dirs, a single `tmp2` dir, and an + array of `dst` dirs to which the plot jobs plot. The `dst` dirs serve as a +temporary buffer space for generated plots. + +- A farming machine with a large number of drives, made accessible via an + `rsyncd` module, and to be entirely populated with plots. These are known as +the `archive` directories. + +- Plot jobs are run with STDOUT/STDERR redirected to a log file in a configured +directory. This allows analysis of progress (plot phase) as well as timing +(e.g. for analyzing performance). + +## Functionality + +Plotman tools are stateless. Rather than keep an internal record of what jobs +have been started, Plotman relies on the process tables, open files, and +logfiles of plot jobs to understand "what's going on". This means the tools +can be stopped and started, even from a different login session, without loss +of information. It also means Plotman can see and manage jobs started manually +or by other tools, as long as their STDOUT/STDERR redirected to a file in a +known logfile directory. (Note: The tool relies on reading the chia plot +command line arguments and the format of the plot tool output. Changes in +those may break this tool.) + +Plot scheduling is done by waiting for a certain amount of wall time since the +last job was started, finding the best (e.g. least recently used) `tmp` dir for +plotting, and ensuring that job has progressed to at least a certain point +(e.g., phase 2, subphase 5). + +Plots are output to the `dst` dirs, which serve as a temporary buffer until they +are rsync'd ("archived") to the farmer/harvester. The archiver does several +things to attempt to avoid concurrent IO. First, it only allows one rsync +process at a time (more sophisticated scheduling could remove this +restriction, but it's nontrivial). Second, it inspects the pipeline of plot +jobs to see which `dst` dirs are about to have plots written to them. This +is balanced against how full the `dst` drives are in a priority scheme. + +It is, obviously, necessary that your rsync bandwidth exceeds your plotting +bandwidth. Given this, in normal operation, the `dst` dirs remain empty until +a plot is finished, after which it is shortly thereafter picked up by the +archive job. However, the decoupling provided by using `dst` drives as a +buffer means that should the farmer/harvester or the network become +unavailable, plotting continues uninterrupted. + +## Screenshot Overview + +``` +Plotman 19:01:06 (refresh 9s/20s) | Plotting: stagger (1623s/1800s) Archival: active pid 1599918 +Prefixes: tmp=/mnt/tmp dst=/home/chia/chia/plots archive=/plots (remote) + + # plot id k tmp dst wall phase tmp pid stat mem user sys io + 0 6b4e7375... 32 03 001 0:27 1:2 71G 1590196 SLP 5.5G 0:52 0:02 0s + 1 9ab50d0e... 32 02 005 1:00 1:4 199G 1539209 SLP 5.5G 3:50 0:09 0s + 2 018cf561... 32 01 000 1:32 1:5 224G 1530045 SLP 5.5G 4:46 0:11 2s + 3 f771de9c... 32 00 004 2:03 1:5 241G 1524772 SLP 5.5G 5:43 0:14 2s +... + 16 58045bef... 32 10 002 11:23 3:5 193G 1381622 RUN 5.4G 15:02 0:53 0:02 + 17 8134a2dd... 32 11 003 11:55 3:6 148G 1372206 RUN 5.4G 15:27 0:57 0:03 + 18 50165422... 32 08 001 12:43 3:6 102G 1357782 RUN 5.4G 16:14 1:00 0:03 + 19 100df84f... 32 09 005 13:19 4:0 0 1347430 DSK 705.9M 16:44 1:04 0:06 + +tmp ready phases tmp ready phases dst plots GB free phases priority + 00 -- 1:5, 3:4 06 -- 2:4 000 1 1890 1:5, 2:2, 3:4 47 + 01 -- 1:5, 3:4 07 -- 2:2 001 0 1998 1:2, 1:7, 3:2, 3:6 34 + 02 -- 1:4, 3:3 08 -- 1:7, 3:6 002 0 1967 1:6, 2:5, 3:5 42 + 03 -- 1:2, 3:2 09 -- 2:1, 4:0 003 0 1998 1:6, 3:1, 3:6 34 + 04 OK 3:1 10 -- 1:6, 3:5 004 0 1998 1:5, 2:4, 3:4 46 + 05 OK 2:5 11 -- 1:6, 3:6 005 0 1955 1:4, 2:1, 3:3, 4:0 18 + +Archive dirs free space +000: 94GB | 005: 94GB | 012: 24GB | 017: 99GB | 022: 94GB | 027: 94GB | 032: 9998GB | 037: 9998GB +001: 94GB | 006: 93GB | 013: 25GB | 018: 94GB | 023: 94GB | 028: 94GB | 033: 9998GB | +002: 93GB | 009: 25GB | 014: 93GB | 019: 31GB | 024: 94GB | 029: 7777GB | 034: 9998GB | +003: 94GB | 010: 25GB | 015: 94GB | 020: 47GB | 025: 94GB | 030: 9998GB | 035: 9998GB | +004: 94GB | 011: 25GB | 016: 99GB | 021: 93GB | 026: 94GB | 031: 9998GB | 036: 9998GB | + +Log: +01-02 18:33:53 Starting plot job: chia plots create -k 32 -r 8 -u 128 -b 4580 -t /mnt/tmp/03 -2 /mnt/tmp/a -d /home/chi +01-02 18:33:53 Starting archive: rsync --bwlimit=100000 --remove-source-files -P /home/chia/chia/plots/004/plot-k32-202 +01-02 18:52:40 Starting archive: rsync --bwlimit=100000 --remove-source-files -P /home/chia/chia/plots/000/plot-k32-202 +``` + +The screenshot shows some of the main features of Plotman. + +The first line shows the status. The plotting status shows whether we just +started a plot, or, if not, why not (e.g., stagger time, tmp directories being +ready, etc.; in this case, the 1800s stagger between plots has not been reached +yet). Archival status says whether we are currently archiving (and provides +the `rsync` pid) or whether there are no plots available in the `dst` drives to +archive. + +The second line provides a key to some directory abbrevations used throughout. +For `tmp` and `dst` directories, we assume they have a common prefix, which is +computed and indicated here, after which they can be referred to (in context) +by their unique suffix. For example, if we have `tmp` dirs `/mnt/tmp/00`, +`/mnt/tmp/01`, `/mnt/tmp/02`, etc., we show `/mnt/tmp` as the prefix here and +can then talk about `tmp` dirs `00` or `01` etc. The `archive` directories are +the same except that these are paths on a remote host and accessed via an +`rsyncd` module (see `src/plotman/resources/plotman.yaml` for details). + +The next table shows information about the active plotting jobs. It is +abbreviated to show the most and least recently started jobs (the full list is +available via the command line mode). It shows various information about the +plot jobs, including the plot ID (first 8 chars), the directories used, +walltime, the current plot phase and subphase, space used on the `tmp` drive, +pid, etc. + +The next tables are a bit hard to read; there is actually a `tmp` table on the +left which is split into two tables for rendering purposes, and a `dst` table +on the right. The `tmp` tables show the phases of the plotting jobs using +them, and whether or not they're ready to take a new plot job. The `dst` table +shows how many plots have accumulated, how much free space is left, and the +phases of jobs that are destined to write to them, and finally, the priority +computed for the archive job to move the plots away. + +The last table simply shows free space of drives on the remote +harverster/farmer. + +Finally, the last section shows a log of actions performed -- namely, plot and +archive jobs initiated. This is the one part of the interactive tool which is +stateful. There is no permanent record of these executed command lines, so if +you start a new interactive plotman session, this log is empty. + +## `plotman` commands +To get a complete list of all available commands run: +```shell +plotman -h +``` + +You can also use `plotman -h` to get help about a specific command, like +```shell +plotman interactive -h +``` + +## Running `plotman` as a daemon +> _PS: this section assumes that you have already configured `plotman.yaml`._ + +By default the command `plotman plot` will start the plotting job and continue to run on the foregroud as long as you keep the terminal window open. If you want to have it constantly running, try the following: +```shell +nohup plotman plot >> ~/plotman.log 2>&1 & +``` + +## Limitations and Issues + +The system is tested on Linux only. Plotman should be generalizable to other +platforms, but this is not done yet. Some of the issues around making calls +out to command line programs (e.g., running `df` over `ssh` to obtain the free +space on the remote archive directories) are very linux-y. + +The interactive mode uses the `curses` library ... poorly. Keypresses are +not received, screen resizing does not work, and the minimum terminal size +is pretty big. + +Plotman assumes all plots are k32s. Again, this is just an unimplemented +generalization. + +Many features are inconsistently supported between either the "interactive" +mode or the command line mode. + +There are many bugs and TODOs. + +Plotman will always look for the `plotman.yaml` file within your computer at an OS-based +default location. To generate a default `plotman.yaml`, run: +```shell +> plotman config generate +``` + +To display the current location of your `plotman.yaml` file and check if it exists, run: +```shell +> plotman config path +``` + +([See also](https://github.com/ericaltendorf/plotman/pull/61#issuecomment-812967363)). + +## Installation + +Installation for Linux and macOS: + +1. Plotman assumes that a functioning [Chia](https://github.com/Chia-Network/chia-blockchain) + installation is present on the system. + - virtual environment (Linux, macOS): Activate your `chia` environment by typing + `source /path/to/your/chia/install/activate`. + - dmg (macOS): Follow [these instructions](https://github.com/Chia-Network/chia-blockchain/wiki/CLI-Commands-Reference#mac) + to add the `chia` binary to the `PATH` +2. Then, install Plotman using the following command: + ```shell + > pip install --force-reinstall git+https://github.com/ericaltendorf/plotman@main + ``` +3. Plotman will look for `plotman.yaml` within your computer at an OS-based + default location. To create a default `plotman.yaml` and display its location, + run the following command: + ```shell + > plotman config generate + ``` + The default configuration file used as a starting point is located [here](./src/plotman/resources/plotman.yaml) +4. That's it! You can now run Plotman by typing `plotman version` to verify its version. + Run `plotman --help` to learn about the available commands. + +*Note:* If you see `ModuleNotFoundError: No module named 'readline'` when using `plotman` on [RHEL based linux](https://github.com/ericaltendorf/plotman/issues/195) after installing using [chia's guide](https://github.com/Chia-Network/chia-blockchain/wiki/INSTALL#centos--red-hat--fedora), install `readline-devel` then reinstall chia starting at compiling python in a new build environment; or consider using a project like `pyenv`. + +## Basic Usage: + +1. Install + +2. Generate initial config + +3. Configure (default location can be found with `plotman config path`). Options explained in the default config file (step 2) + +4. Create log directory specified in `directories: { log: "" }` + +5. Start plotman: `plotman plot` or `plotman interactive` + +6. Check status: `plotman status` + +### Development note: + +If you are forking Plotman, simply replace the installation step with `pip install --editable .[dev]` from the project root directory to install *your* version of plotman with test and development extras. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..0fac0690 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.4.1+dev diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..2b2c6b0f --- /dev/null +++ b/mypy.ini @@ -0,0 +1,26 @@ +[mypy] +show_error_codes = true +strict = true +mypy_path = src/ + +[mypy-appdirs] +ignore_missing_imports = true + +[mypy-click] +ignore_missing_imports = true + +[mypy-pendulum] +# TODO: https://github.com/sdispater/pendulum/pull/551 +implicit_reexport = true + +[mypy-psutil] +ignore_missing_imports = true + +[mypy-pyfakefs] +ignore_missing_imports = true + +[mypy-texttable] +ignore_missing_imports = true + +[mypy-yaml] +ignore_missing_imports = true diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..9787c3bd --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..f922b21e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,83 @@ +[metadata] +name = plotman +version = file: VERSION +author = Eric Altendorf +home-page = https://github.com/ericaltendorf/plotman +description = Chia plotting manager +long-description = file: README.md +long_description_content_type = text/markdown +license = Apache 2.0 +license-file = LICENSE +python_requires = >=3.7 +keywords = chia, blockchain, automation, process management +classifiers = + Development Status :: 3 - Alpha + Environment :: Console :: Curses + Intended Audience :: Developers + Intended Audience :: System Administrators + Intended Audience :: Information Technology + License :: OSI Approved :: Apache Software License + Natural Language :: English + Operating System :: POSIX :: Linux + Operating System :: MacOS :: MacOS X + Programming Language :: Python :: 3 + Programming Language :: Python :: Implementation :: CPython + Programming Language :: Python :: Implementation :: PyPy + Topic :: System :: Monitoring + Topic :: System :: Systems Administration + Topic :: Utilities +project_urls = + Bug Tracker = https://github.com/ericaltendorf/plotman/issues + Changelog = https://github.com/ericaltendorf/plotman/blob/main/CHANGELOG.md + +[options] +include_package_data = True +package_dir= + =src +packages=find: +install_requires = + appdirs ~= 1.4 + attrs == 21.2 + click ~= 7.1 + desert == 2020.11.18 + marshmallow ~= 3.12 + pendulum ~= 2.1 + psutil ~= 5.8 + pyyaml ~= 5.4 + texttable ~= 1.6 + typing-extensions ~= 3.10 + +[options.packages.find] +where=src + +[options.entry_points] +console_scripts = + plotman = plotman.plotman:main + +[options.extras_require] +coverage = + coverage + diff-cover +dev = + %(test)s + isort +test = + %(coverage)s + pytest + pytest-cov + pyfakefs +checks = + check-manifest ~= 0.46 + mypy == 0.902 + types-pkg_resources ~= 0.1.2 + %(test)s + +[options.data_files] +config = src/plotman/resources/plotman.yaml +bin = util/listlogs + +[isort] +multi_line_output=3 +include_trailing_comma=True +force_grid_wrap=0 +line_length=88 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..056ba45d --- /dev/null +++ b/setup.py @@ -0,0 +1,4 @@ +import setuptools + + +setuptools.setup() diff --git a/src/plotman/__init__.py b/src/plotman/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/plotman/__main__.py b/src/plotman/__main__.py new file mode 100644 index 00000000..99f0472d --- /dev/null +++ b/src/plotman/__main__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 + +from plotman import plotman + + +"""Plotman module launcher. +This is a shim that allows you to run plotman via + python3 -m plotman +""" +plotman.main() diff --git a/src/plotman/_tests/__init__.py b/src/plotman/_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/plotman/_tests/archive_test.py b/src/plotman/_tests/archive_test.py new file mode 100644 index 00000000..ddd05e75 --- /dev/null +++ b/src/plotman/_tests/archive_test.py @@ -0,0 +1,6 @@ +from plotman import archive, job + + +def test_compute_priority() -> None: + assert (archive.compute_priority( job.Phase(major=3, minor=1), 1000, 10) > + archive.compute_priority( job.Phase(major=3, minor=6), 1000, 10) ) diff --git a/src/plotman/_tests/configuration_test.py b/src/plotman/_tests/configuration_test.py new file mode 100644 index 00000000..db4aa41e --- /dev/null +++ b/src/plotman/_tests/configuration_test.py @@ -0,0 +1,104 @@ +"""Tests for plotman/configuration.py""" +import importlib.resources + +import pytest +import yaml + +from plotman import configuration +from plotman import resources as plotman_resources + + +@pytest.fixture(name='config_text') +def config_text_fixture() -> str: + return importlib.resources.read_text(plotman_resources, "plotman.yaml") + + +@pytest.fixture(name='target_definitions_text') +def target_definitions_text_fixture() -> str: + return importlib.resources.read_text( + plotman_resources, "target_definitions.yaml", + ) + + +def test_get_validated_configs__default(config_text: str, target_definitions_text: str) -> None: + """Check that get_validated_configs() works with default/example plotman.yaml file.""" + res = configuration.get_validated_configs(config_text, '', target_definitions_text) + assert isinstance(res, configuration.PlotmanConfig) + +def test_get_validated_configs__malformed(config_text: str, target_definitions_text: str) -> None: + """Check that get_validated_configs() raises exception with invalid plotman.yaml contents.""" + loaded_yaml = yaml.load(config_text, Loader=yaml.SafeLoader) + + # Purposefully malform the contents of loaded_yaml by changing tmp from List[str] --> str + loaded_yaml["directories"]["tmp"] = "/mnt/tmp/00" + malformed_config_text = yaml.dump(loaded_yaml, Dumper=yaml.SafeDumper) + + with pytest.raises(configuration.ConfigurationException) as exc_info: + configuration.get_validated_configs(malformed_config_text, '/the_path', target_definitions_text) + + assert exc_info.value.args[0] == f"Config file at: '/the_path' is malformed" + + +def test_get_validated_configs__missing() -> None: + """Check that get_validated_configs() raises exception when plotman.yaml does not exist.""" + with pytest.raises(configuration.ConfigurationException) as exc_info: + configuration.read_configuration_text('/invalid_path') + + assert exc_info.value.args[0] == ( + f"No 'plotman.yaml' file exists at expected location: '/invalid_path'. To generate " + f"default config file, run: 'plotman config generate'" + ) + + +def test_loads_without_user_interface(config_text: str, target_definitions_text: str) -> None: + loaded_yaml = yaml.load(config_text, Loader=yaml.SafeLoader) + + del loaded_yaml["user_interface"] + + stripped_config_text = yaml.dump(loaded_yaml, Dumper=yaml.SafeDumper) + + reloaded_yaml = configuration.get_validated_configs(stripped_config_text, '', target_definitions_text) + + assert reloaded_yaml.user_interface == configuration.UserInterface() + + +def test_loads_without_user_archiving(config_text: str, target_definitions_text: str) -> None: + loaded_yaml = yaml.load(config_text, Loader=yaml.SafeLoader) + + del loaded_yaml["archiving"] + + stripped_config_text = yaml.dump(loaded_yaml, Dumper=yaml.SafeDumper) + + reloaded_yaml = configuration.get_validated_configs(stripped_config_text, '', target_definitions_text) + + assert reloaded_yaml.archiving is None + + +def test_get_dst_directories_gets_dst() -> None: + tmp = ['/tmp'] + dst = ['/dst0', '/dst1'] + directories = configuration.Directories(tmp=tmp, dst=dst) + + assert directories.get_dst_directories() == dst + + +def test_get_dst_directories_gets_tmp() -> None: + tmp = ['/tmp'] + directories = configuration.Directories(tmp=tmp) + + assert directories.get_dst_directories() == tmp + + +def test_dst_is_dst() -> None: + tmp = ['/tmp'] + dst = ['/dst0', '/dst1'] + directories = configuration.Directories(tmp=tmp, dst=dst) + + assert not directories.dst_is_tmp() + + +def test_dst_is_tmp() -> None: + tmp = ['/tmp'] + directories = configuration.Directories(tmp=tmp) + + assert directories.dst_is_tmp() diff --git a/src/plotman/_tests/job_test.py b/src/plotman/_tests/job_test.py new file mode 100644 index 00000000..84269f40 --- /dev/null +++ b/src/plotman/_tests/job_test.py @@ -0,0 +1,147 @@ +import contextlib +import datetime +import locale +import importlib.resources +import os +import pathlib +import typing + +import pendulum +import pytest +from plotman import job +from plotman._tests import resources + + +class FauxJobWithLogfile: + # plotman.job.Job does too much in its .__init_() so we have this to let us + # test its .init_from_logfile(). + + start_time: pendulum.DateTime + + def __init__(self, logfile_path: str) -> None: + self.logfile = logfile_path + + def update_from_logfile(self) -> None: + pass + + +@pytest.fixture(name='logfile_path') +def logfile_fixture(tmp_path: pathlib.Path) -> pathlib.Path: + log_name = '2021-04-04T19_00_47.681088-0400.log' + log_contents = importlib.resources.read_binary(resources, log_name) + log_file_path = tmp_path.joinpath(log_name) + log_file_path.write_bytes(log_contents) + + return log_file_path + + +@contextlib.contextmanager +def set_locale(name: str) -> typing.Generator[str, None, None]: + # This is terrible and not thread safe. + + original = locale.setlocale(locale.LC_ALL) + + try: + yield locale.setlocale(locale.LC_ALL, name) + finally: + locale.setlocale(locale.LC_ALL, original) + +with set_locale('C'): + log_file_time = datetime.datetime.strptime('Sun Apr 4 19:00:50 2021', '%a %b %d %H:%M:%S %Y') + +@pytest.mark.parametrize( + argnames=['locale_name'], + argvalues=[['C'], ['en_US.UTF-8'], ['de_DE.UTF-8']], +) +def test_job_parses_time_with_non_english_locale(logfile_path: pathlib.Path, locale_name: str) -> None: + faux_job_with_logfile = FauxJobWithLogfile(logfile_path=os.fspath(logfile_path)) + + with set_locale(locale_name): + job.Job.init_from_logfile(self=faux_job_with_logfile) # type: ignore[arg-type] + + assert faux_job_with_logfile.start_time == log_file_time + + +@pytest.mark.parametrize( + argnames=['arguments'], + argvalues=[ + [['-h']], + [['--help']], + [['-k', '32']], + [['-k32']], + [['-k', '32', '--help']], + ], + ids=str, +) +def test_chia_plots_create_parsing_does_not_fail(arguments: typing.List[str]) -> None: + job.parse_chia_plots_create_command_line( + command_line=['python', 'chia', 'plots', 'create', *arguments], + ) + + +@pytest.mark.parametrize( + argnames=['arguments'], + argvalues=[ + [['-h']], + [['--help']], + [['-k', '32', '--help']], + ], + ids=str, +) +def test_chia_plots_create_parsing_detects_help(arguments: typing.List[str]) -> None: + parsed = job.parse_chia_plots_create_command_line( + command_line=['python', 'chia', 'plots', 'create', *arguments], + ) + + assert parsed.help + + +@pytest.mark.parametrize( + argnames=['arguments'], + argvalues=[ + [[]], + [['-k32']], + [['-k', '32']], + ], + ids=str, +) +def test_chia_plots_create_parsing_detects_not_help(arguments: typing.List[str]) -> None: + parsed = job.parse_chia_plots_create_command_line( + command_line=['python', 'chia', 'plots', 'create', *arguments], + ) + + assert not parsed.help + + +@pytest.mark.parametrize( + argnames=['arguments'], + argvalues=[ + [[]], + [['-k32']], + [['-k', '32']], + [['--size', '32']], + ], + ids=str, +) +def test_chia_plots_create_parsing_handles_argument_forms(arguments: typing.List[str]) -> None: + parsed = job.parse_chia_plots_create_command_line( + command_line=['python', 'chia', 'plots', 'create', *arguments], + ) + + assert parsed.parameters['size'] == 32 + + +@pytest.mark.parametrize( + argnames=['arguments'], + argvalues=[ + [['--size32']], + [['--not-an-actual-option']], + ], + ids=str, +) +def test_chia_plots_create_parsing_identifies_errors(arguments: typing.List[str]) -> None: + parsed = job.parse_chia_plots_create_command_line( + command_line=['python', 'chia', 'plots', 'create', *arguments], + ) + + assert parsed.error is not None diff --git a/src/plotman/_tests/log_parser_test.py b/src/plotman/_tests/log_parser_test.py new file mode 100644 index 00000000..4d9f6db7 --- /dev/null +++ b/src/plotman/_tests/log_parser_test.py @@ -0,0 +1,59 @@ +import importlib.resources + +from plotman._tests import resources +from plotman.log_parser import PlotLogParser +import plotman.job +import plotman.plotinfo + +example_info = plotman.plotinfo.PlotInfo( + started_at=plotman.job.parse_chia_plot_time(s="Sun Apr 4 19:00:50 2021"), + plot_id="3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24", + buckets=128, + threads=4, + buffer=4000, + plot_size=32, + tmp_dir1="/farm/yards/901", + tmp_dir2="/farm/yards/901", + phase1_duration_raw=17571.981, + phase2_duration_raw=6911.621, + phase3_duration_raw=14537.188, + phase4_duration_raw=924.288, + total_time_raw=39945.080, + copy_time_raw=501.696, + filename="/farm/wagons/801/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot", +) + + +def test_should_correctly_parse() -> None: + with importlib.resources.open_text( + resources, + "2021-04-04T19_00_47.681088-0400.log", + ) as file: + parser = PlotLogParser() + info = parser.parse(file) + + assert info == example_info + + assert info.phase1_duration == 17572 + assert info.phase1_duration_minutes == 293 + assert info.phase1_duration_hours == 4.88 + + assert info.phase2_duration == 6912 + assert info.phase2_duration_minutes == 115 + assert info.phase2_duration_hours == 1.92 + + assert info.phase3_duration == 14537 + assert info.phase3_duration_minutes == 242 + assert info.phase3_duration_hours == 4.04 + + assert info.phase4_duration == 924 + assert info.phase4_duration_minutes == 15 + assert info.phase4_duration_hours == 0.26 + + assert info.total_time == 39945 + assert info.total_time_minutes == 666 + assert info.total_time_hours == 11.10 + + assert info.copy_time == 502 + assert info.copy_time_minutes == 8 + assert info.copy_time_hours == 0.14 diff --git a/src/plotman/_tests/manager_test.py b/src/plotman/_tests/manager_test.py new file mode 100644 index 00000000..0c0b752e --- /dev/null +++ b/src/plotman/_tests/manager_test.py @@ -0,0 +1,100 @@ +import typing +# TODO: migrate away from unittest patch +from unittest.mock import patch + +import pytest + +from plotman import configuration, job, manager + + +@pytest.fixture +def sched_cfg() -> configuration.Scheduling: + return configuration.Scheduling( + global_max_jobs=1, + global_stagger_m=2, + polling_time_s=2, + tmpdir_stagger_phase_major=3, + tmpdir_stagger_phase_minor=0, + tmpdir_max_jobs=3 + ) + +@pytest.fixture +def dir_cfg() -> configuration.Directories: + return configuration.Directories( + tmp=["/var/tmp", "/tmp"], + dst=["/mnt/dst/00", "/mnt/dst/01", "/mnt/dst/03"], + tmp_overrides={"/mnt/tmp/04": configuration.TmpOverrides(tmpdir_max_jobs=4)} + ) + +def test_permit_new_job_post_milestone(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (3, 8), (4, 1) ]) + assert manager.phases_permit_new_job( + phases, '/mnt/tmp/00', sched_cfg, dir_cfg) + +def test_permit_new_job_pre_milestone(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (2, 3), (4, 1) ]) + assert not manager.phases_permit_new_job( + phases, '/mnt/tmp/00', sched_cfg, dir_cfg) + +def test_permit_new_job_too_many_jobs(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (3, 1), (3, 2), (3, 3) ]) + assert not manager.phases_permit_new_job( + phases, '/mnt/tmp/00', sched_cfg, dir_cfg) + +def test_permit_new_job_too_many_jobs_zerophase(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (3, 0), (3, 1), (3, 3) ]) + assert not manager.phases_permit_new_job( + phases, '/mnt/tmp/00', sched_cfg, dir_cfg) + +def test_permit_new_job_too_many_jobs_nonephase(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (None, None), (3, 1), (3, 3) ]) + assert manager.phases_permit_new_job( + phases, '/mnt/tmp/00', sched_cfg, dir_cfg) + +def test_permit_new_job_override_tmp_dir(sched_cfg: configuration.Scheduling, dir_cfg: configuration.Directories) -> None: + phases = job.Phase.list_from_tuples([ (3, 1), (3, 2), (3, 3) ]) + assert manager.phases_permit_new_job( + phases, '/mnt/tmp/04', sched_cfg, dir_cfg) + phases = job.Phase.list_from_tuples([ (3, 1), (3, 2), (3, 3), (3, 6) ]) + assert not manager.phases_permit_new_job( + phases, '/mnt/tmp/04', sched_cfg, + dir_cfg) + +@patch('plotman.job.Job') +def job_w_tmpdir_phase(tmpdir: str, phase: job.Phase, MockJob: typing.Any) -> typing.Any: + j = MockJob() + j.progress.return_value = phase + j.tmpdir = tmpdir + return j + +@patch('plotman.job.Job') +def job_w_dstdir_phase(dstdir: str, phase: job.Phase, MockJob: typing.Any) -> typing.Any: + j = MockJob() + j.progress.return_value = phase + j.dstdir = dstdir + return j + +def test_dstdirs_to_furthest_phase() -> None: + all_jobs = [ job_w_dstdir_phase('/plots1', job.Phase(1, 5)), + job_w_dstdir_phase('/plots2', job.Phase(1, 1)), + job_w_dstdir_phase('/plots2', job.Phase(3, 1)), + job_w_dstdir_phase('/plots2', job.Phase(2, 1)), + job_w_dstdir_phase('/plots3', job.Phase(4, 1)) ] + + assert (manager.dstdirs_to_furthest_phase(all_jobs) == + { '/plots1' : job.Phase(1, 5), + '/plots2' : job.Phase(3, 1), + '/plots3' : job.Phase(4, 1) } ) + + +def test_dstdirs_to_youngest_phase() -> None: + all_jobs = [ job_w_dstdir_phase('/plots1', job.Phase(1, 5)), + job_w_dstdir_phase('/plots2', job.Phase(1, 1)), + job_w_dstdir_phase('/plots2', job.Phase(3, 1)), + job_w_dstdir_phase('/plots2', job.Phase(2, 1)), + job_w_dstdir_phase('/plots3', job.Phase(4, 1)) ] + + assert (manager.dstdirs_to_youngest_phase(all_jobs) == + { '/plots1' : job.Phase(1, 5), + '/plots2' : job.Phase(1, 1), + '/plots3' : job.Phase(4, 1) } ) diff --git a/src/plotman/_tests/plot_util_test.py b/src/plotman/_tests/plot_util_test.py new file mode 100644 index 00000000..4a9a8c0b --- /dev/null +++ b/src/plotman/_tests/plot_util_test.py @@ -0,0 +1,66 @@ +import os + +import pyfakefs + +from plotman import plot_util +from plotman.plot_util import GB + + +def test_human_format() -> None: + assert (plot_util.human_format(3442000000, 0) == '3G') + assert (plot_util.human_format(3542000, 2) == '3.54M') + assert (plot_util.human_format(354, 0) == '354') + assert (plot_util.human_format(354, 0, True) == '354') + assert (plot_util.human_format(354, 2) == '354.00') + assert (plot_util.human_format(422399296143, 2) == '422.40G') + assert (plot_util.human_format(422399296143, 2, True) == '393.39Gi') + +def test_time_format() -> None: + assert (plot_util.time_format(34) == '34s') + assert (plot_util.time_format(59) == '59s') + assert (plot_util.time_format(60) == '0:01') + assert (plot_util.time_format(119) == '0:01') + assert (plot_util.time_format(120) == '0:02') + assert (plot_util.time_format(3694) == '1:01') + +def test_split_path_prefix() -> None: + assert (plot_util.split_path_prefix( [] ) == + ('', []) ) + assert (plot_util.split_path_prefix([ '/a/0', '/b/1', '/c/2' ]) == + ('', ['/a/0', '/b/1', '/c/2']) ) + assert ( plot_util.split_path_prefix([ '/a/b/0', '/a/b/1', '/a/b/2' ]) == + ('/a/b', ['0', '1', '2']) ) + +def test_columns() -> None: + assert (plot_util.column_wrap(list(range(8)), 3, filler='--') == + [ [ 0, 3, 6 ], + [ 1, 4, 7 ], + [ 2, 5, '--'] ] ) + assert (plot_util.column_wrap(list(range(9)), 3, filler='--') == + [ [ 0, 3, 6 ], + [ 1, 4, 7 ], + [ 2, 5, 8 ] ] ) + assert (plot_util.column_wrap(list(range(3)), 1, filler='--') == + [ [ 0 ], + [ 1 ], + [ 2 ] ] ) + +def test_list_k32_plots(fs: pyfakefs.fake_filesystem.FakeFilesystem) -> None: + fs.create_file('/t/plot-k32-0.plot', st_size=108 * GB) + fs.create_file('/t/plot-k32-1.plot', st_size=108 * GB) + fs.create_file('/t/.plot-k32-2.plot', st_size=108 * GB) + fs.create_file('/t/plot-k32-3.plot.2.tmp', st_size=108 * GB) + fs.create_file('/t/plot-k32-4.plot', st_size=100 * GB) + fs.create_file('/t/plot-k32-5.plot', st_size=108 * GB) + + assert (plot_util.list_k32_plots('/t/') == + [ '/t/plot-k32-0.plot', + '/t/plot-k32-1.plot', + '/t/plot-k32-5.plot' ] ) + + +def test_get_plotsize() -> None: + assert ( + [659272492, 107287518791, 221143636517, 455373353413, 936816632588] + == [plot_util.get_plotsize(n) for n in [25, 32, 33, 34, 35]] + ) diff --git a/src/plotman/_tests/reporting_test.py b/src/plotman/_tests/reporting_test.py new file mode 100644 index 00000000..5ce0ffa9 --- /dev/null +++ b/src/plotman/_tests/reporting_test.py @@ -0,0 +1,81 @@ +# TODO: migrate away from unittest patch +import os +import typing +from unittest.mock import patch, Mock + +from plotman import reporting +from plotman import job + + +def test_phases_str_basic() -> None: + phases = job.Phase.list_from_tuples([(1,2), (2,3), (3,4), (4,0)]) + assert reporting.phases_str(phases) == '1:2 2:3 3:4 4:0' + +def test_phases_str_elipsis_1() -> None: + phases = job.Phase.list_from_tuples([(1,2), (2,3), (3,4), (4,0)]) + assert reporting.phases_str(phases, 3) == '1:2 [+1] 3:4 4:0' + +def test_phases_str_elipsis_2() -> None: + phases = job.Phase.list_from_tuples([(1,2), (2,3), (3,4), (4,0)]) + assert reporting.phases_str(phases, 2) == '1:2 [+2] 4:0' + +def test_phases_str_none() -> None: + phases = job.Phase.list_from_tuples([(None, None), (3, 0)]) + assert reporting.phases_str(phases) == '?:? 3:0' + +def test_job_viz_empty() -> None: + assert(reporting.job_viz([]) == '1 2 3 4 ') + +@patch('plotman.job.Job') +def job_w_phase(ph: typing.Tuple[typing.Optional[int], typing.Optional[int]], MockJob: Mock) -> Mock: + j = MockJob() + j.progress.return_value = job.Phase.from_tuple(ph) + return j # type: ignore[no-any-return] + +def test_job_viz_positions() -> None: + jobs = [job_w_phase((1, 1)), + job_w_phase((2, 0)), + job_w_phase((2, 4)), + job_w_phase((2, 7)), + job_w_phase((4, 0))] + + assert(reporting.job_viz(jobs) == '1 . 2. . .3 4.') # type: ignore[arg-type] + +def test_job_viz_counts() -> None: + jobs = [job_w_phase((2, 2)), + job_w_phase((2, 3)), + job_w_phase((2, 3)), + job_w_phase((2, 4)), + job_w_phase((2, 4)), + job_w_phase((2, 4)), + job_w_phase((2, 5)), + job_w_phase((2, 5)), + job_w_phase((2, 5)), + job_w_phase((2, 5)), + job_w_phase((3, 1)), + job_w_phase((3, 1)), + job_w_phase((3, 1)), + job_w_phase((3, 1)), + job_w_phase((3, 1)), + job_w_phase((3, 1)), + ] + + assert(reporting.job_viz(jobs) == '1 2 .:;! 3 ! 4 ') # type: ignore[arg-type] + +def test_to_prometheus_format() -> None: + prom_stati = [ + ('foo="bar",baz="2"', {'metric1': 1, 'metric2': 2}), + ('foo="blubb",baz="3"', {'metric1': 2, 'metric2': 3}) + ] + metrics = {'metric1': 'This is foo', 'metric2': 'In a parallel universe this is foo'} + expected = [ + '# HELP metric1 This is foo.', + '# TYPE metric1 gauge', + 'metric1{foo="bar",baz="2"} 1', + 'metric1{foo="blubb",baz="3"} 2', + '# HELP metric2 In a parallel universe this is foo.', + '# TYPE metric2 gauge', + 'metric2{foo="bar",baz="2"} 2','metric2{foo="blubb",baz="3"} 3' + ] + result = reporting.to_prometheus_format(metrics, prom_stati) + assert(result == expected) diff --git a/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.log b/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.log new file mode 100644 index 00000000..ce11a343 --- /dev/null +++ b/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.log @@ -0,0 +1,2089 @@ +19:00:50.561 src.plotting.create_plots : INFO  Creating 1 plots of size 32, pool public key: 0b6f2b9428744d5062a2073e14b3ca9896a71f7ca9850bdcb285f26108fb19f610c788d47e4830c4c7abfa7611e00168 farmer public key: 93222af1a0f7b2ff39f98eb87c1b609fea797798302a60d1f1d6e5152cfdce12c260325d78446e7b8758101b64f43bd5 +19:00:50.576 src.plotting.create_plots : INFO  Memo: 0b6f2b9428744d5062a2073e14b3ca9896a71f7ca9850bdcb285f26108fb19f610c788d47e4830c4c7abfa7611e0016893222af1a0f7b2ff39f98eb87c1b609fea797798302a60d1f1d6e5152cfdce12c260325d78446e7b8758101b64f43bd5053ad2848e58469c8529c36fdfaf0fc00552f00778704010a4a88ce33d705892 +19:00:50.576 src.plotting.create_plots : INFO  Starting plot 1/1 + +Starting plotting progress into temporary dirs: /farm/yards/901 and /farm/yards/901 +ID: 3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24 +Plot size is: 32 +Buffer size is: 4000MiB +Using 128 buckets +Using 4 threads of stripe size 65536 + +Starting phase 1/4: Forward Propagation into tmp files... Sun Apr 4 19:00:50 2021 +Computing table 1 +F1 complete, time: 193.141 seconds. CPU (173.42%) Sun Apr 4 19:04:03 2021 +Computing table 2 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.281GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + Total matches: 4294907691 +Forward propagation table time: 2305.238 seconds. CPU (191.490%) Sun Apr 4 19:42:29 2021 +Computing table 3 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Total matches: 4294825940 +Forward propagation table time: 2719.802 seconds. CPU (178.050%) Sun Apr 4 20:27:49 2021 +Computing table 4 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Total matches: 4294693125 +Forward propagation table time: 3456.261 seconds. CPU (172.590%) Sun Apr 4 21:25:25 2021 +Computing table 5 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.813GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Total matches: 4294469524 +Forward propagation table time: 3343.521 seconds. CPU (173.250%) Sun Apr 4 22:21:08 2021 +Computing table 6 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 2.750GiB, qs min: 0.688GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 1.375GiB, qs min: 0.687GiB. + Total matches: 4293986638 +Forward propagation table time: 3127.428 seconds. CPU (178.720%) Sun Apr 4 23:13:16 2021 +Computing table 7 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 1.125GiB, qs min: 0.562GiB. + Bucket 127 uniform sort. Ram: 3.840GiB, u_sort min: 2.250GiB, qs min: 0.563GiB. + Total matches: 4292974565 +Forward propagation table time: 2426.274 seconds. CPU (183.160%) Sun Apr 4 23:53:42 2021 +Time for phase 1 = 17571.981 seconds. CPU (178.600%) Sun Apr 4 23:53:42 2021 + +Starting phase 2/4: Backpropagation into tmp files... Sun Apr 4 23:53:42 2021 +Backpropagating on table 7 +scanned table 7 +scanned time = 337.014 seconds. CPU (30.500%) Sun Apr 4 23:59:19 2021 +sorting table 7 +Backpropagating on table 6 +scanned table 6 +scanned time = 339.546 seconds. CPU (69.420%) Mon Apr 5 00:14:11 2021 +sorting table 6 +sort time = 909.801 seconds. CPU (78.330%) Mon Apr 5 00:29:21 2021 +Backpropagating on table 5 +scanned table 5 +scanned time = 347.923 seconds. CPU (70.320%) Mon Apr 5 00:35:11 2021 +sorting table 5 +sort time = 886.987 seconds. CPU (77.400%) Mon Apr 5 00:49:58 2021 +Backpropagating on table 4 +scanned table 4 +scanned time = 326.522 seconds. CPU (68.940%) Mon Apr 5 00:55:26 2021 +sorting table 4 +sort time = 845.687 seconds. CPU (80.330%) Mon Apr 5 01:09:31 2021 +Backpropagating on table 3 +scanned table 3 +scanned time = 341.132 seconds. CPU (69.560%) Mon Apr 5 01:15:14 2021 +sorting table 3 +sort time = 837.117 seconds. CPU (78.960%) Mon Apr 5 01:29:11 2021 +Backpropagating on table 2 +scanned table 2 +scanned time = 331.484 seconds. CPU (70.220%) Mon Apr 5 01:34:44 2021 +sorting table 2 +sort time = 848.353 seconds. CPU (79.340%) Mon Apr 5 01:48:52 2021 +table 1 new size: 3425107198 +Time for phase 2 = 6911.621 seconds. CPU (71.780%) Mon Apr 5 01:48:54 2021 +Wrote: 268 + +Starting phase 3/4: Compression from tmp files into "/farm/yards/901/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp" ... Mon Apr 5 01:48:54 2021 +Compressing tables 1 and 2 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 102 QS. Ram: 3.840GiB, u_sort min: 0.102GiB, qs min: 0.031GiB. force_qs: 1 + First computation pass time: 1185.951 seconds. CPU (83.650%) Mon Apr 5 02:08:40 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.682GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.643GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.688GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.662GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.628GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.640GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.654GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.651GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.659GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.653GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.648GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.664GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.674GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.661GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.650GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.644GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.653GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.640GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.629GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.630GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.641GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.645GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.641GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.646GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.644GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.651GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.653GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.655GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.656GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.657GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.648GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.645GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.642GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.638GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.638GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.639GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.641GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.642GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.646GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.652GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.654GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.654GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.651GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.650GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.646GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.646GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.646GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.644GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.642GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.643GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.643GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.647GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. + Bucket 63 QS. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.649GiB. force_qs: 1 + Second computation pass time: 819.668 seconds. CPU (81.220%) Mon Apr 5 02:22:20 2021 + Wrote 3429267383 entries +Total compress table time: 2006.181 seconds. CPU (82.660%) Mon Apr 5 02:22:20 2021 +Compressing tables 2 and 3 + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 52 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 54 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 56 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 57 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 58 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 59 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 60 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 62 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 64 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 65 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 66 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 67 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 68 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 69 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 70 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 71 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 72 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 74 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 76 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 78 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 79 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 80 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 81 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 82 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 84 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 85 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 86 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 88 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 89 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 90 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 91 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 92 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 93 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 94 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 95 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 96 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 97 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 98 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 99 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 100 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.407GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 101 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.408GiB. + Bucket 102 QS. Ram: 1.920GiB, u_sort min: 0.203GiB, qs min: 0.082GiB. force_qs: 1 + Bucket 51 QS. Ram: 1.920GiB, u_sort min: 0.203GiB, qs min: 0.081GiB. force_qs: 1 + First computation pass time: 1641.359 seconds. CPU (85.940%) Mon Apr 5 02:49:41 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.021GiB. + Bucket 40 QS. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 0.817GiB. force_qs: 1 + Second computation pass time: 869.565 seconds. CPU (79.930%) Mon Apr 5 03:04:11 2021 + Wrote 3439708244 entries +Total compress table time: 2510.965 seconds. CPU (83.860%) Mon Apr 5 03:04:11 2021 +Compressing tables 3 and 4 + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 52 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 54 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 56 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 57 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 58 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 59 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 60 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 62 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 64 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 65 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 66 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 67 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 68 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 69 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 70 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 71 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 72 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 74 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 76 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 78 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 79 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 80 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 81 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 82 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 84 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 85 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 86 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 88 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 89 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 90 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 91 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 92 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 93 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 94 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 95 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 96 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 97 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.410GiB. + Bucket 98 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 99 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 100 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 101 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.409GiB. + Bucket 102 QS. Ram: 1.920GiB, u_sort min: 0.813GiB, qs min: 0.209GiB. force_qs: 1 + Bucket 51 QS. Ram: 1.920GiB, u_sort min: 0.813GiB, qs min: 0.208GiB. force_qs: 1 + First computation pass time: 1631.845 seconds. CPU (86.160%) Mon Apr 5 03:31:23 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.022GiB. + Bucket 41 QS. Ram: 3.840GiB, u_sort min: 0.102GiB, qs min: 0.050GiB. force_qs: 1 + Second computation pass time: 870.303 seconds. CPU (79.480%) Mon Apr 5 03:45:53 2021 + Wrote 3465774150 entries +Total compress table time: 2502.233 seconds. CPU (83.840%) Mon Apr 5 03:45:53 2021 +Compressing tables 4 and 5 + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 52 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 54 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 56 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 57 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 58 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 59 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 60 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 62 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 64 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 65 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 66 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 67 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 68 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 69 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 70 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 71 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 72 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 74 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 76 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 78 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 79 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 80 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 81 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 82 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 84 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 85 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 86 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 88 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 89 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 90 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 91 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 92 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 93 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 94 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 95 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 96 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 97 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 98 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 99 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 100 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 101 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 102 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.414GiB. + Bucket 51 QS. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.523GiB. force_qs: 1 + Bucket 103 QS. Ram: 1.920GiB, u_sort min: 0.406GiB, qs min: 0.119GiB. force_qs: 1 + First computation pass time: 1667.602 seconds. CPU (86.330%) Mon Apr 5 04:13:41 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.026GiB. + Bucket 41 QS. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.691GiB. force_qs: 1 + Second computation pass time: 888.151 seconds. CPU (81.280%) Mon Apr 5 04:28:29 2021 + Wrote 3532387464 entries +Total compress table time: 2555.841 seconds. CPU (84.580%) Mon Apr 5 04:28:29 2021 +Compressing tables 5 and 6 + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 0 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 1 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 2 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 3 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 4 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 5 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 6 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 7 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 8 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 9 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 10 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 11 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 12 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 13 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 14 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 15 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 16 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 17 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 18 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 19 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 20 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 21 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 22 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 23 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 24 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 25 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 52 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 26 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 54 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 27 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 56 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 28 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 57 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 58 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 29 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 59 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 60 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 30 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 61 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 62 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 31 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 63 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 64 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 32 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 65 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 66 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 33 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 67 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 68 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 34 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 69 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 70 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 35 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 71 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 72 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 36 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 73 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 74 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 37 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 75 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 76 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 38 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 77 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 78 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 39 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 79 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 80 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 40 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 81 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 82 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 41 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 83 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 84 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 42 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 85 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 86 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 43 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 87 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 88 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 44 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 89 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 90 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 45 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 91 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 92 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 46 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 93 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 94 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 47 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 95 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 96 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 48 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 97 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 98 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 49 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 99 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 100 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 50 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 101 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 102 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 51 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 103 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 104 uniform sort. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.427GiB. + Bucket 52 QS. Ram: 1.920GiB, u_sort min: 1.625GiB, qs min: 0.517GiB. force_qs: 1 + Bucket 105 QS. Ram: 1.920GiB, u_sort min: 0.406GiB, qs min: 0.117GiB. force_qs: 1 + First computation pass time: 1729.316 seconds. CPU (85.660%) Mon Apr 5 04:57:18 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.039GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 3.250GiB, qs min: 1.038GiB. + Bucket 43 QS. Ram: 3.840GiB, u_sort min: 0.813GiB, qs min: 0.302GiB. force_qs: 1 + Second computation pass time: 949.061 seconds. CPU (78.800%) Mon Apr 5 05:13:07 2021 + Wrote 3712594744 entries +Total compress table time: 2678.466 seconds. CPU (83.230%) Mon Apr 5 05:13:07 2021 +Compressing tables 6 and 7 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 1.625GiB, qs min: 0.812GiB. + Bucket 55 QS. Ram: 3.840GiB, u_sort min: 0.813GiB, qs min: 0.262GiB. force_qs: 1 + First computation pass time: 1220.986 seconds. CPU (76.040%) Mon Apr 5 05:33:28 2021 + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 1.003GiB. + Bucket 47 QS. Ram: 3.840GiB, u_sort min: 3.000GiB, qs min: 0.823GiB. force_qs: 1 + Second computation pass time: 1062.327 seconds. CPU (79.610%) Mon Apr 5 05:51:11 2021 + Wrote 4292974565 entries +Total compress table time: 2283.456 seconds. CPU (77.700%) Mon Apr 5 05:51:11 2021 +Time for phase 3 = 14537.188 seconds. CPU (82.730%) Mon Apr 5 05:51:11 2021 + +Starting phase 4/4: Write Checkpoint tables into "/farm/yards/901/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp" ... Mon Apr 5 05:51:11 2021 + Starting to write C1 and C3 tables + Bucket 0 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 1 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 2 uniform sort. Ram: 3.840GiB, u_sort min: 1.500GiB, qs min: 0.375GiB. + Bucket 3 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 4 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 5 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 6 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 7 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 8 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 9 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 10 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 11 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 12 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 13 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 14 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 15 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 16 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 17 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 18 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 19 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 20 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 21 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 22 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 23 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 24 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 25 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 26 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 27 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 28 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 29 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 30 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 31 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 32 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 33 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 34 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 35 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 36 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 37 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 38 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 39 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 40 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 41 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 42 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 43 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 44 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 45 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 46 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 47 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 48 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 49 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 50 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 51 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 52 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 53 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 54 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 55 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 56 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 57 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 58 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 59 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 60 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 61 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 62 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 63 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 64 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 65 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 66 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 67 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 68 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 69 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 70 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 71 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 72 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 73 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 74 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 75 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 76 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 77 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 78 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 79 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 80 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 81 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 82 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 83 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 84 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 85 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 86 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 87 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 88 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 89 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 90 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 91 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 92 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 93 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 94 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 95 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 96 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 97 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 98 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 99 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 100 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 101 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 102 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 103 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 104 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 105 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 106 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 107 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 108 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 109 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 110 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 111 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 112 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 113 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 114 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 115 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 116 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 117 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 118 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 119 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 120 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 121 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 122 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 123 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 124 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 125 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 126 uniform sort. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. + Bucket 127 QS. Ram: 3.840GiB, u_sort min: 0.750GiB, qs min: 0.375GiB. force_qs: 1 + Finished writing C1 and C3 tables + Writing C2 table + Finished writing C2 table + Final table pointers: + P1: 0x10c + P2: 0x3747866ce + P3: 0x6b5e02770 + P4: 0x9fd989c25 + P5: 0xd5574d320 + P6: 0x10d8fa9cb3 + P7: 0x14e91f0332 + C1: 0x1908a19632 + C2: 0x1908bbc9fe + C3: 0x1908bbcaae +Time for phase 4 = 924.288 seconds. CPU (86.810%) Mon Apr 5 06:06:35 2021 +Approximate working space used (without final file): 286.598 GiB +Final File size: 101.336 GiB +Total time = 39945.080 seconds. CPU (123.100%) Mon Apr 5 06:06:35 2021 +Copied final file from "/farm/yards/901/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp" to "/farm/wagons/801/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp" +Copy time = 501.696 seconds. CPU (23.860%) Sun May 9 22:52:41 2021 +Removed temp2 file "/farm/yards/901/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp"? 1 +Renamed final file from "/farm/wagons/801/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot.2.tmp" to "/farm/wagons/801/plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot" +06:22:40.715 src.plotting.create_plots : INFO  Summary: +06:22:40.715 src.plotting.create_plots : INFO  Created a total of 1 new plots +06:22:40.715 src.plotting.create_plots : INFO  plot-k32-2021-04-04-19-00-3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24.plot diff --git a/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.notes b/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.notes new file mode 100644 index 00000000..0f12a7ad --- /dev/null +++ b/src/plotman/_tests/resources/2021-04-04T19_00_47.681088-0400.notes @@ -0,0 +1,8 @@ +This sample log file was created from a real log file. It has had the +following values replaced by naively randomly generated hexadecimal +characters. + +pool public key +farmer public key +memo +id diff --git a/src/plotman/_tests/resources/__init__.py b/src/plotman/_tests/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py new file mode 100644 index 00000000..5cdbaa3e --- /dev/null +++ b/src/plotman/analyzer.py @@ -0,0 +1,185 @@ +import os +import re +import statistics +import sys +import typing + +import texttable as tt +import numpy as np + +import matplotlib +import matplotlib.pyplot as plt + +from plotman import plot_util + + +def analyze(logfilenames: typing.List[str], clipterminals: bool, bytmp: bool, bybitfield: bool) -> None: + data: typing.Dict[str, typing.Dict[str, typing.List[float]]] = {} + for logfilename in logfilenames: + with open(logfilename, 'r') as f: + # Record of slicing and data associated with the slice + sl = 'x' # Slice key + phase_time: typing.Dict[str, float] = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + is_first_last = False + + # Read the logfile, triggering various behaviors on various + # regex matches. + for line in f: + # Beginning of plot job. We may encounter this multiple + # times, if a job was run with -n > 1. Sample log line: + # 2021-04-08T13:33:43.542 chia.plotting.create_plots : INFO Starting plot 1/5 + m = re.search(r'Starting plot (\d*)/(\d*)', line) + if m: + # (re)-initialize data structures + sl = 'x' # Slice key + phase_time = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + + seq_num = int(m.group(1)) + seq_total = int(m.group(2)) + is_first_last = seq_num == 1 or seq_num == seq_total + + # Temp dirs. Sample log line: + # Starting plotting progress into temporary dirs: /mnt/tmp/01 and /mnt/tmp/a + m = re.search(r'^Starting plotting.*dirs: (.*) and (.*)', line) + if m: + # Record tmpdir, if slicing by it + if bytmp: + tmpdir = m.group(1) + sl += '-' + tmpdir + + # Bitfield marker. Sample log line(s): + # Starting phase 2/4: Backpropagation without bitfield into tmp files... Mon Mar 1 03:56:11 2021 + # or + # Starting phase 2/4: Backpropagation into tmp files... Fri Apr 2 03:17:32 2021 + m = re.search(r'^Starting phase 2/4: Backpropagation', line) + if bybitfield and m: + if 'without bitfield' in line: + sl += '-nobitfield' + else: + sl += '-bitfield' + + # Phase timing. Sample log line: + # Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020 + for phase in ['1', '2', '3', '4']: + m = re.search(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) + if m: + phase_time[phase] = float(m.group(1)) + + # Uniform sort. Sample log line: + # Bucket 267 uniform sort. Ram: 0.920GiB, u_sort min: 0.688GiB, qs min: 0.172GiB. + # or + # ....?.... + # or + # Bucket 511 QS. Ram: 0.920GiB, u_sort min: 0.375GiB, qs min: 0.094GiB. force_qs: 1 + m = re.search(r'Bucket \d+ ([^\.]+)\..*', line) + if m and not 'force_qs' in line: + sorter = m.group(1) + n_sorts += 1 + if sorter == 'uniform sort': + n_uniform += 1 + elif sorter == 'QS': + pass + else: + print ('Warning: unrecognized sort ' + sorter) + + # Job completion. Record total time in sliced data store. + # Sample log line: + # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 + m = re.search(r'^Total time = (\d+.\d+) seconds.*', line) + if m: + if clipterminals and is_first_last: + pass # Drop this data; omit from statistics. + else: + data.setdefault(sl, {}).setdefault('total time', []).append(float(m.group(1))) + for phase in ['1', '2', '3', '4']: + data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) + data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) + + # Grab the time ended, compute the time started + time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) + data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) + data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - float(m.group(1))) + + if figfile is not None: + # Prepare report + for sl in data.keys(): + + # This array will hold start and end data (in hours) + data_started_ended = np.array([[ts, te, te-ts] for + ts, te in zip(data[sl]['time started'], data[sl]['time ended']) + ]) / (60 * 60) + assert data_started_ended.shape[0] >= 3, 'Cannot generate figure with less than 3 datapoints ({} datapoints passed)'.format(data_started_ended.shape[0]) + + # Sift the data so that it starts at zero + data_started_ended -= np.min(data_started_ended[:, 0]) + + # Sort the rows by start time + data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] + + # Create figure + num_plots = 4 + f, _ = plt.subplots(2,1, figsize=(8, 12)) + ax = plt.subplot(num_plots,1,1) + ax.set_title('Plot performance summary') + + create_ax_dumbbell(ax, data_started_ended) + + ax = plt.subplot(num_plots,1,2) + create_ax_plotrate(ax, data_started_ended, end=True, window=3) + + ax = plt.subplot(num_plots,1,3) + create_ax_plottime(ax, data_started_ended, window=3) + + ax = plt.subplot(num_plots,1,4) + create_ax_plotcumulative(ax, data_started_ended) + + print('Saving analysis figure to {}'.format(figfile)) + ax.set_xlabel('Time (hours)') + f.savefig(figfile) + + # Prepare report + tab = tt.Texttable() + all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] + headings = ['Slice', 'n'] + all_measures + tab.header(headings) + + for sl in data.keys(): + row = [sl] + + # Sample size + sample_sizes = [] + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + sample_sizes.append(len(values)) + sample_size_lower_bound = min(sample_sizes) + sample_size_upper_bound = max(sample_sizes) + if sample_size_lower_bound == sample_size_upper_bound: + row.append('%d' % sample_size_lower_bound) + else: + row.append('%d-%d' % (sample_size_lower_bound, sample_size_upper_bound)) + + # Phase timings + for measure in all_measures: + values = data.get(sl, {}).get(measure, []) + if(len(values) > 1): + row.append('μ=%s σ=%s' % ( + plot_util.human_format(statistics.mean(values), 1), + plot_util.human_format(statistics.stdev(values), 0) + )) + elif(len(values) == 1): + row.append(plot_util.human_format(values[0], 1)) + else: + row.append('N/A') + + tab.add_row(row) + + (rows, columns) = os.popen('stty size', 'r').read().split() + tab.set_max_width(int(columns)) + s = tab.draw() + print(s) + + diff --git a/src/plotman/archive.py b/src/plotman/archive.py new file mode 100644 index 00000000..13c36ddd --- /dev/null +++ b/src/plotman/archive.py @@ -0,0 +1,258 @@ +import argparse +import contextlib +import logging +import math +import os +import posixpath +import random +import re +import subprocess +import sys +import typing +from datetime import datetime + +import pendulum +import psutil +import texttable as tt + +from plotman import configuration, job, manager, plot_util + + +logger = logging.getLogger(__name__) + +_WINDOWS = sys.platform == 'win32' + +# TODO : write-protect and delete-protect archived plots + +def spawn_archive_process(dir_cfg: configuration.Directories, arch_cfg: configuration.Archiving, log_cfg: configuration.Logging, all_jobs: typing.List[job.Job]) -> typing.Tuple[typing.Union[bool, str, typing.Dict[str, object]], typing.List[str]]: + '''Spawns a new archive process using the command created + in the archive() function. Returns archiving status and a log message to print.''' + + log_messages = [] + archiving_status = None + + # Look for running archive jobs. Be robust to finding more than one + # even though the scheduler should only run one at a time. + arch_jobs: typing.List[typing.Union[int, str]] = [*get_running_archive_jobs(arch_cfg)] + + if not arch_jobs: + (should_start, status_or_cmd, archive_log_messages) = archive(dir_cfg, arch_cfg, all_jobs) + log_messages.extend(archive_log_messages) + if not should_start: + archiving_status = status_or_cmd + else: + args: typing.Dict[str, object] = status_or_cmd # type: ignore[assignment] + + log_file_path = log_cfg.create_transfer_log_path(time=pendulum.now()) + + log_messages.append(f'Starting archive: {args["args"]} ; logging to {log_file_path}') + # TODO: CAMPid 09840103109429840981397487498131 + try: + open_log_file = open(log_file_path, 'x') + except FileExistsError: + log_messages.append( + f'Archiving log file already exists, skipping attempt to start a' + f' new archive transfer: {log_file_path!r}' + ) + return (False, log_messages) + except FileNotFoundError as e: + message = ( + f'Unable to open log file. Verify that the directory exists' + f' and has proper write permissions: {log_file_path!r}' + ) + raise Exception(message) from e + + # Preferably, do not add any code between the try block above + # and the with block below. IOW, this space intentionally left + # blank... As is, this provides a good chance that our handle + # of the log file will get closed explicitly while still + # allowing handling of just the log file opening error. + + if sys.platform == 'win32': + creationflags = subprocess.CREATE_NO_WINDOW + else: + creationflags = 0 + + with open_log_file: + # start_new_sessions to make the job independent of this controlling tty. + p = subprocess.Popen(**args, # type: ignore[call-overload] + shell=True, + stdout=open_log_file, + stderr=subprocess.STDOUT, + start_new_session=True, + creationflags=creationflags) + # At least for now it seems that even if we get a new running + # archive jobs list it doesn't contain the new rsync process. + # My guess is that this is because the bash in the middle due to + # shell=True is still starting up and really hasn't launched the + # new rsync process yet. So, just put a placeholder here. It + # will get filled on the next cycle. + arch_jobs.append('') + + if archiving_status is None: + archiving_status = 'pid: ' + ', '.join(map(str, arch_jobs)) + + return archiving_status, log_messages + +def compute_priority(phase: job.Phase, gb_free: float, n_plots: int) -> int: + # All these values are designed around dst buffer dirs of about + # ~2TB size and containing k32 plots. TODO: Generalize, and + # rewrite as a sort function. + + priority = 50 + + # To avoid concurrent IO, we should not touch drives that + # are about to receive a new plot. If we don't know the phase, + # ignore. + if (phase.known): + if (phase == job.Phase(3, 4)): + priority -= 4 + elif (phase == job.Phase(3, 5)): + priority -= 8 + elif (phase == job.Phase(3, 6)): + priority -= 16 + elif (phase >= job.Phase(3, 7)): + priority -= 32 + + # If a drive is getting full, we should prioritize it + if (gb_free < 1000): + priority += 1 + int((1000 - gb_free) / 100) + if (gb_free < 500): + priority += 1 + int((500 - gb_free) / 100) + + # Finally, least importantly, pick drives with more plots + # over those with fewer. + priority += n_plots + + return priority + +def get_archdir_freebytes(arch_cfg: configuration.Archiving) -> typing.Tuple[typing.Dict[str, int], typing.List[str]]: + log_messages = [] + target = arch_cfg.target_definition() + + archdir_freebytes = {} + timeout = 5 + try: + completed_process = subprocess.run( + [target.disk_space_path], # type: ignore[list-item] + env={**os.environ, **arch_cfg.environment()}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + timeout=timeout, + ) + except subprocess.TimeoutExpired as e: + log_messages.append(f'Disk space check timed out in {timeout} seconds') + if e.stdout is None: + stdout = '' + else: + stdout = e.stdout.decode('utf-8', errors='ignore').strip() + if e.stderr is None: + stderr = '' + else: + stderr = e.stderr.decode('utf-8', errors='ignore').strip() + else: + stdout = completed_process.stdout.decode('utf-8', errors='ignore').strip() + stderr = completed_process.stderr.decode('utf-8', errors='ignore').strip() + for line in stdout.splitlines(): + line = line.strip() + split = line.split(':') + if len(split) != 2: + log_messages.append(f'Unable to parse disk script line: {line!r}') + continue + archdir, space = split + freebytes = int(space) + archdir_freebytes[archdir.strip()] = freebytes + + for line in log_messages: + logger.info(line) + + logger.info('stdout from disk space script:') + for line in stdout.splitlines(): + logger.info(f' {line}') + + logger.info('stderr from disk space script:') + for line in stderr.splitlines(): + logger.info(f' {line}') + + return archdir_freebytes, log_messages + +# TODO: maybe consolidate with similar code in job.py? +def get_running_archive_jobs(arch_cfg: configuration.Archiving) -> typing.List[int]: + '''Look for running rsync jobs that seem to match the pattern we use for archiving + them. Return a list of PIDs of matching jobs.''' + jobs = [] + target = arch_cfg.target_definition() + variables = {**os.environ, **arch_cfg.environment()} + dest = target.transfer_process_argument_prefix.format(**variables) + proc_name = target.transfer_process_name.format(**variables) + for proc in psutil.process_iter(): + with contextlib.suppress(psutil.NoSuchProcess): + with proc.oneshot(): + if proc.name() == proc_name: + args = proc.cmdline() + for arg in args: + if arg.startswith(dest): + jobs.append(proc.pid) + return jobs + +def archive(dir_cfg: configuration.Directories, arch_cfg: configuration.Archiving, all_jobs: typing.List[job.Job]) -> typing.Tuple[bool, typing.Optional[typing.Union[typing.Dict[str, object], str]], typing.List[str]]: + '''Configure one archive job. Needs to know all jobs so it can avoid IO + contention on the plotting dstdir drives. Returns either (False, ) + if we should not execute an archive job or (True, ) with the archive + command if we should.''' + log_messages: typing.List[str] = [] + if arch_cfg is None: + return (False, "No 'archive' settings declared in plotman.yaml", log_messages) + + dir2ph = manager.dstdirs_to_furthest_phase(all_jobs) + best_priority = -100000000 + chosen_plot = None + dst_dir = dir_cfg.get_dst_directories() + for d in dst_dir: + ph = dir2ph.get(d, job.Phase(0, 0)) + dir_plots = plot_util.list_k32_plots(d) + gb_free = plot_util.df_b(d) / plot_util.GB + n_plots = len(dir_plots) + priority = compute_priority(ph, gb_free, n_plots) + if priority >= best_priority and dir_plots: + best_priority = priority + chosen_plot = dir_plots[0] + + if not chosen_plot: + return (False, 'No plots found', log_messages) + + # TODO: sanity check that archive machine is available + # TODO: filter drives mounted RO + + # + # Pick first archive dir with sufficient space + # + archdir_freebytes, freebytes_log_messages = get_archdir_freebytes(arch_cfg) + log_messages.extend(freebytes_log_messages) + if not archdir_freebytes: + return(False, 'No free archive dirs found.', log_messages) + + archdir = '' + chosen_plot_size = os.stat(chosen_plot).st_size + # 10MB is big enough to outsize filesystem block sizes hopefully, but small + # enough to make this a pretty tight corner for people to get stuck in. + free_space_margin = 10_000_000 + available = [(d, space) for (d, space) in archdir_freebytes.items() if + space > (chosen_plot_size + free_space_margin)] + if len(available) > 0: + index = min(arch_cfg.index, len(available) - 1) + (archdir, freespace) = sorted(available)[index] + + if not archdir: + return(False, 'No archive directories found with enough free space', log_messages) + + env = arch_cfg.environment( + source=chosen_plot, + destination=archdir, + ) + subprocess_arguments: typing.Dict[str, object] = { + 'args': arch_cfg.target_definition().transfer_path, + 'env': {**os.environ, **env} + } + + return (True, subprocess_arguments, log_messages) diff --git a/src/plotman/chia.py b/src/plotman/chia.py new file mode 100644 index 00000000..7a2c7d0c --- /dev/null +++ b/src/plotman/chia.py @@ -0,0 +1,371 @@ +# mypy: allow_untyped_decorators + +import functools +import typing + +import click +from pathlib import Path +import typing_extensions + + +class CommandProtocol(typing_extensions.Protocol): + def make_context(self, info_name: str, args: typing.List[str]) -> click.Context: + ... + + def __call__(self) -> None: + ... + + +class Commands: + def __init__(self) -> None: + self.by_version: typing.Dict[typing.Sequence[int], CommandProtocol] = {} + + def register(self, version: typing.Sequence[int]) -> typing.Callable[[CommandProtocol], None]: + if version in self.by_version: + raise Exception(f'Version already registered: {version!r}') + if not isinstance(version, tuple): + raise Exception(f'Version must be a tuple: {version!r}') + + return functools.partial(self._decorator, version=version) + + def _decorator(self, command: CommandProtocol, *, version: typing.Sequence[int]) -> None: + self.by_version[version] = command + # self.by_version = dict(sorted(self.by_version.items())) + + def __getitem__(self, item: typing.Sequence[int]) -> typing.Callable[[], None]: + return self.by_version[item] + + def latest_command(self) -> CommandProtocol: + return max(self.by_version.items())[1] + + +commands = Commands() + +@commands.register(version=(1, 1, 2)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.2/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.2/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=4608, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_2() -> None: + pass + + +@commands.register(version=(1, 1, 3)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.3/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.3/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=4608, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_3() -> None: + pass + + +@commands.register(version=(1, 1, 4)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.4/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.4/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_4() -> None: + pass + + +@commands.register(version=(1, 1, 5)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.5/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.5/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_5() -> None: + pass + + +@commands.register(version=(1, 1, 6)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.6/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.6/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_6() -> None: + pass + + +@commands.register(version=(1, 1, 7)) +@click.command() +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.7/LICENSE +# https://github.com/Chia-Network/chia-blockchain/blob/1.1.7/chia/cmds/plots.py#L39-L83 +# start copied code +@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) +@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) +@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) +@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True) +@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) +@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) +@click.option( + "-a", + "--alt_fingerprint", + type=int, + default=None, + help="Enter the alternative fingerprint of the key you want to use", +) +@click.option( + "-c", + "--pool_contract_address", + type=str, + default=None, + help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", +) +@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) +@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) +@click.option( + "-t", + "--tmp_dir", + help="Temporary directory for plotting files", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) +@click.option( + "-d", + "--final_dir", + help="Final directory for plots (relative or absolute)", + type=click.Path(), + default=Path("."), + show_default=True, +) +@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) +@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) +@click.option( + "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True +) +# end copied code +def _cli_1_1_7() -> None: + pass diff --git a/src/plotman/chiapos.py b/src/plotman/chiapos.py new file mode 100644 index 00000000..6fb55f34 --- /dev/null +++ b/src/plotman/chiapos.py @@ -0,0 +1,87 @@ +# version = 1.0.2 +# https://github.com/Chia-Network/chiapos/blob/1.0.2/LICENSE +# https://github.com/Chia-Network/chiapos/blob/1.0.2/src/pos_constants.hpp +# start ported code +# Unique plot id which will be used as a ChaCha8 key, and determines the PoSpace. +kIdLen = 32; + +# Distance between matching entries is stored in the offset +kOffsetSize = 10; + +# Max matches a single entry can have, used for hardcoded memory allocation +kMaxMatchesSingleEntry = 30; +kMinBuckets = 16; +kMaxBuckets = 128; + +# During backprop and compress, the write pointer is ahead of the read pointer +# Note that the large the offset, the higher these values must be +kReadMinusWrite = 1 << kOffsetSize; +kCachedPositionsSize = kReadMinusWrite * 4; + +# Must be set high enough to prevent attacks of fast plotting +kMinPlotSize = 18; + +# Set to 50 since k + kExtraBits + k*4 must not exceed 256 (BLAKE3 output size) +kMaxPlotSize = 50; + +# The amount of spare space used for sort on disk (multiplied time memory buffer size) +kSpareMultiplier = 5; + +# The proportion of memory to allocate to the Sort Manager for reading in buckets and sorting them +# The lower this number, the more memory must be provided by the caller. However, lowering the +# number also allows a higher proportion for writing, which reduces seeks for HDD. +kMemSortProportion = 0.75; +kMemSortProportionLinePoint = 0.85; + +# How many f7s per C1 entry, and how many C1 entries per C2 entry +kCheckpoint1Interval = 10000; +kCheckpoint2Interval = 10000; + +# F1 evaluations are done in batches of 2^kBatchSizes +kBatchSizes = 8; + +# EPP for the final file, the higher this is, the less variability, and lower delta +# Note: if this is increased, ParkVector size must increase +kEntriesPerPark = 2048; + +# To store deltas for EPP entries, the average delta must be less than this number of bits +kMaxAverageDeltaTable1 = 5.6; +kMaxAverageDelta = 3.5; + +# C3 entries contain deltas for f7 values, the max average size is the following +kC3BitsPerEntry = 2.4; + +# The number of bits in the stub is k minus this value +kStubMinusBits = 3; + +#end ported code + +# version = 1.0.2 +# https://github.com/Chia-Network/chiapos/blob/1.0.2/LICENSE +# https://github.com/Chia-Network/chiapos/blob/1.0.2/src/util.hpp +# start ported code +def ByteAlign(num_bits: float) -> float: + return (num_bits + (8 - ((num_bits) % 8)) % 8) +# end ported code + +# version = 1.0.2 +# https://github.com/Chia-Network/chiapos/blob/1.0.2/LICENSE +# https://github.com/Chia-Network/chiapos/blob/1.0.2/src/entry_sizes.hpp +# start ported code +def CalculateLinePointSize(k: int) -> float: + return ByteAlign(2 * k) / 8 + +# This is the full size of the deltas section in a park. However, it will not be fully filled +def CalculateMaxDeltasSize(k: int, table_index: int) -> float: + if (table_index == 1): + return ByteAlign((kEntriesPerPark - 1) * kMaxAverageDeltaTable1) / 8 + + return ByteAlign((kEntriesPerPark - 1) * kMaxAverageDelta) / 8 + +def CalculateStubsSize(k: int) -> float: + return ByteAlign((kEntriesPerPark - 1) * (k - kStubMinusBits)) / 8 + +def CalculateParkSize(k: int, table_index: int) -> float: + return CalculateLinePointSize(k) + CalculateStubsSize(k) + CalculateMaxDeltasSize(k, table_index); + +# end ported code diff --git a/src/plotman/configuration.py b/src/plotman/configuration.py new file mode 100644 index 00000000..3181b5a9 --- /dev/null +++ b/src/plotman/configuration.py @@ -0,0 +1,322 @@ +import contextlib +import importlib +import os +import stat +import tempfile +import textwrap +from typing import Dict, Generator, List, Mapping, Optional + +import appdirs +import attr +import desert +# TODO: should be a desert.ib() but mypy doesn't understand it then, see below +import desert._make +import marshmallow +import pendulum +import yaml + +from plotman import resources as plotman_resources + + +class ConfigurationException(Exception): + """Raised when plotman.yaml configuration is missing or malformed.""" + + +def get_path() -> str: + """Return path to where plotman.yaml configuration file should exist.""" + config_dir: str = appdirs.user_config_dir("plotman") + return config_dir + "/plotman.yaml" + + +def read_configuration_text(config_path: str) -> str: + try: + with open(config_path, "r") as file: + return file.read() + except FileNotFoundError as e: + raise ConfigurationException( + f"No 'plotman.yaml' file exists at expected location: '{config_path}'. To generate " + f"default config file, run: 'plotman config generate'" + ) from e + + +def get_validated_configs(config_text: str, config_path: str, preset_target_definitions_text: str) -> "PlotmanConfig": + """Return a validated instance of PlotmanConfig with data from plotman.yaml + + :raises ConfigurationException: Raised when plotman.yaml is either missing or malformed + """ + schema = desert.schema(PlotmanConfig) + config_objects = yaml.load(config_text, Loader=yaml.SafeLoader) + + version = config_objects.get('version', (0,)) + + expected_major_version = 1 + + if version[0] != expected_major_version: + message = textwrap.dedent(f"""\ + Expected major version {expected_major_version}, found version {version} + See https://github.com/ericaltendorf/plotman/wiki/Configuration#versions + """) + + raise Exception(message) + + loaded: PlotmanConfig + try: + loaded = schema.load(config_objects) + except marshmallow.exceptions.ValidationError as e: + raise ConfigurationException( + f"Config file at: '{config_path}' is malformed" + ) from e + + if loaded.archiving is not None: + preset_target_objects = yaml.safe_load(preset_target_definitions_text) + preset_target_schema = desert.schema(PresetTargetDefinitions) + preset_target_definitions = preset_target_schema.load(preset_target_objects) + + loaded.archiving.target_definitions = { + **preset_target_definitions.target_definitions, + **loaded.archiving.target_definitions, + } + + return loaded + +class CustomStringField(marshmallow.fields.String): + def _deserialize(self, value: object, attr: Optional[str], data: Optional[Mapping[str, object]], **kwargs: Dict[str, object]) -> str: + if isinstance(value, int): + value = str(value) + + return super()._deserialize(value, attr, data, **kwargs) # type: ignore[no-any-return] + +# Data models used to deserializing/formatting plotman.yaml files. + +# TODO: bah, mutable? bah. +@attr.mutable +class ArchivingTarget: + transfer_process_name: str + transfer_process_argument_prefix: str + # TODO: mutable attribute... + # TODO: should be a desert.ib() but mypy doesn't understand it then + env: Dict[str, Optional[str]] = attr.ib( + factory=dict, + metadata={ + desert._make._DESERT_SENTINEL: { + 'marshmallow_field': marshmallow.fields.Dict( + keys=marshmallow.fields.String(), + values=CustomStringField(allow_none=True), + ) + }, + }, + ) + disk_space_path: Optional[str] = None + disk_space_script: Optional[str] = None + transfer_path: Optional[str] = None + transfer_script: Optional[str] = None + +@attr.frozen +class PresetTargetDefinitions: + target_definitions: Dict[str, ArchivingTarget] = attr.ib(factory=dict) + +# TODO: bah, mutable? bah. +@attr.mutable +class Archiving: + target: str + # TODO: mutable attribute... + # TODO: should be a desert.ib() but mypy doesn't understand it then + env: Dict[str, str] = attr.ib( + factory=dict, + metadata={ + desert._make._DESERT_SENTINEL: { + 'marshmallow_field': marshmallow.fields.Dict( + keys=marshmallow.fields.String(), + values=CustomStringField(), + ) + }, + }, + ) + index: int = 0 # If not explicit, "index" will default to 0 + target_definitions: Dict[str, ArchivingTarget] = attr.ib(factory=dict) + + def target_definition(self) -> ArchivingTarget: + return self.target_definitions[self.target] + + def environment( + self, + source: Optional[str] = None, + destination: Optional[str] = None, + ) -> Dict[str, str]: + target = self.target_definition() + maybe_complete = {**target.env, **self.env} + + complete = { + key: value + for key, value in maybe_complete.items() + if value is not None + } + + if len(complete) != len(maybe_complete): + missing_mandatory_keys = sorted(maybe_complete.keys() - complete.keys()) + target_repr = repr(self.target) + missing = ', '.join(repr(key) for key in missing_mandatory_keys) + message = f'Missing env options for archival target {target_repr}: {missing}' + raise Exception(message) + + variables = {**os.environ, **complete} + complete['process_name'] = target.transfer_process_name.format(**variables) + + if source is not None: + complete['source'] = source + + if destination is not None: + complete['destination'] = destination + + return complete + + def maybe_create_scripts(self, temp: str) -> None: + rwx = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR + target = self.target_definition() + + if target.disk_space_path is None: + if target.disk_space_script is None: + raise Exception(f"One of `disk_space_path` or `disk_space_script` must be specified. Using target {self.target!r}") + + with tempfile.NamedTemporaryFile( + mode='w', + encoding='utf-8', + prefix='plotman-disk-space-script', + delete=False, + dir=temp, + ) as disk_space_script_file: + disk_space_script_file.write(target.disk_space_script) + + target.disk_space_path = disk_space_script_file.name + os.chmod(target.disk_space_path, rwx) + + if target.transfer_path is None: + if target.transfer_script is None: + raise Exception(f"One of `transfer_path` or `transfer_script` must be specified. Using target {self.target!r}") + + with tempfile.NamedTemporaryFile( + mode='w', + encoding='utf-8', + prefix='plotman-transfer-script', + delete=False, + dir=temp, + ) as transfer_script_file: + transfer_script_file.write(target.transfer_script) + + target.transfer_path = transfer_script_file.name + os.chmod(target.transfer_path, rwx) + +@attr.frozen +class TmpOverrides: + tmpdir_max_jobs: Optional[int] = None + +@attr.frozen +class Logging: + plots: str = os.path.join(appdirs.user_data_dir("plotman"), 'plots') + transfers: str = os.path.join(appdirs.user_data_dir("plotman"), 'transfers') + application: str = os.path.join(appdirs.user_log_dir("plotman"), 'plotman.log') + + def setup(self) -> None: + os.makedirs(self.plots, exist_ok=True) + os.makedirs(self.transfers, exist_ok=True) + os.makedirs(os.path.dirname(self.application), exist_ok=True) + + def create_plot_log_path(self, time: pendulum.DateTime) -> str: + return self._create_log_path( + time=time, + directory=self.plots, + group='plot', + ) + + def create_transfer_log_path(self, time: pendulum.DateTime) -> str: + return self._create_log_path( + time=time, + directory=self.transfers, + group='transfer', + ) + + def _create_log_path(self, time: pendulum.DateTime, directory: str, group: str) -> str: + timestamp = time.isoformat(timespec='microseconds').replace(':', '_') + return os.path.join(directory, f'{timestamp}.{group}.log') + +@attr.frozen +class Directories: + tmp: List[str] + dst: Optional[List[str]] = None + tmp2: Optional[str] = None + tmp_overrides: Optional[Dict[str, TmpOverrides]] = None + + def dst_is_tmp(self) -> bool: + return self.dst is None and self.tmp2 is None + + def dst_is_tmp2(self) -> bool: + return self.dst is None and self.tmp2 is not None + + def get_dst_directories(self) -> List[str]: + """Returns either or . If + Directories.dst is None, Use Directories.tmp as dst directory. + """ + if self.dst_is_tmp2(): + return [self.tmp2] # type: ignore[list-item] + elif self.dst_is_tmp(): + return self.tmp + + return self.dst # type: ignore[return-value] + +@attr.frozen +class Scheduling: + global_max_jobs: int + global_stagger_m: int + polling_time_s: int + tmpdir_max_jobs: int + tmpdir_stagger_phase_major: int + tmpdir_stagger_phase_minor: int + tmpdir_stagger_phase_limit: int = 1 # If not explicit, "tmpdir_stagger_phase_limit" will default to 1 + +@attr.frozen +class Plotting: + k: int + e: bool + n_threads: int + n_buckets: int + job_buffer: int + farmer_pk: Optional[str] = None + pool_pk: Optional[str] = None + pool_contract_address: Optional[str] = None + x: bool = False + +@attr.frozen +class UserInterface: + use_stty_size: bool = True + +@attr.frozen +class Interactive: + autostart_plotting: bool = True + autostart_archiving: bool = True + +@attr.frozen +class Commands: + interactive: Interactive = attr.ib(factory=Interactive) + +@attr.frozen +class PlotmanConfig: + directories: Directories + scheduling: Scheduling + plotting: Plotting + commands: Commands = attr.ib(factory=Commands) + logging: Logging = Logging() + archiving: Optional[Archiving] = None + user_interface: UserInterface = attr.ib(factory=UserInterface) + version: List[int] = [0] + + @contextlib.contextmanager + def setup(self) -> Generator[None, None, None]: + prefix = f'plotman-pid_{os.getpid()}-' + + self.logging.setup() + + with tempfile.TemporaryDirectory(prefix=prefix) as temp: + if self.archiving is not None: + self.archiving.maybe_create_scripts(temp=temp) + + yield diff --git a/src/plotman/csv_exporter.py b/src/plotman/csv_exporter.py new file mode 100644 index 00000000..4cb679e0 --- /dev/null +++ b/src/plotman/csv_exporter.py @@ -0,0 +1,134 @@ +import csv +import sys +import typing + +import attr +import attr._make +import pendulum + +from plotman.log_parser import PlotLogParser +import plotman.plotinfo + + +@attr.frozen +class Row: + plot_id: str = attr.ib(converter=str, metadata={'name': 'Plot ID'}) + started_at: str = attr.ib(converter=str, metadata={'name': 'Started at'}) + date: str = attr.ib(converter=str, metadata={'name': 'Date'}) + size: str = attr.ib(converter=str, metadata={'name': 'Size'}) + buffer: str = attr.ib(converter=str, metadata={'name': 'Buffer'}) + buckets: str = attr.ib(converter=str, metadata={'name': 'Buckets'}) + threads: str = attr.ib(converter=str, metadata={'name': 'Threads'}) + tmp_dir_1: str = attr.ib(converter=str, metadata={'name': 'Tmp dir 1'}) + tmp_dir_2: str = attr.ib(converter=str, metadata={'name': 'Tmp dir 2'}) + phase_1_duration_raw: str = attr.ib(converter=str, metadata={'name': 'Phase 1 duration (raw)'}) + phase_1_duration: str = attr.ib(converter=str, metadata={'name': 'Phase 1 duration'}) + phase_1_duration_minutes: str = attr.ib(converter=str, metadata={'name': 'Phase 1 duration (minutes)'}) + phase_1_duration_hours: str = attr.ib(converter=str, metadata={'name': 'Phase 1 duration (hours)'}) + phase_2_duration_raw: str = attr.ib(converter=str, metadata={'name': 'Phase 2 duration (raw)'}) + phase_2_duration: str = attr.ib(converter=str, metadata={'name': 'Phase 2 duration'}) + phase_2_duration_minutes: str = attr.ib(converter=str, metadata={'name': 'Phase 2 duration (minutes)'}) + phase_2_duration_hours: str = attr.ib(converter=str, metadata={'name': 'Phase 2 duration (hours)'}) + phase_3_duration_raw: str = attr.ib(converter=str, metadata={'name': 'Phase 3 duration (raw)'}) + phase_3_duration: str = attr.ib(converter=str, metadata={'name': 'Phase 3 duration'}) + phase_3_duration_minutes: str = attr.ib(converter=str, metadata={'name': 'Phase 3 duration (minutes)'}) + phase_3_duration_hours: str = attr.ib(converter=str, metadata={'name': 'Phase 3 duration (hours)'}) + phase_4_duration_raw: str = attr.ib(converter=str, metadata={'name': 'Phase 4 duration (raw)'}) + phase_4_duration: str = attr.ib(converter=str, metadata={'name': 'Phase 4 duration'}) + phase_4_duration_minutes: str = attr.ib(converter=str, metadata={'name': 'Phase 4 duration (minutes)'}) + phase_4_duration_hours: str = attr.ib(converter=str, metadata={'name': 'Phase 4 duration (hours)'}) + total_time_raw: str = attr.ib(converter=str, metadata={'name': 'Total time (raw)'}) + total_time: str = attr.ib(converter=str, metadata={'name': 'Total time'}) + total_time_minutes: str = attr.ib(converter=str, metadata={'name': 'Total time (minutes)'}) + total_time_hours: str = attr.ib(converter=str, metadata={'name': 'Total time (hours)'}) + copy_time_raw: str = attr.ib(converter=str, metadata={'name': 'Copy time (raw)'}) + copy_time: str = attr.ib(converter=str, metadata={'name': 'Copy time'}) + copy_time_minutes: str = attr.ib(converter=str, metadata={'name': 'Copy time (minutes)'}) + copy_time_hours: str = attr.ib(converter=str, metadata={'name': 'Copy time (hours)'}) + filename: str = attr.ib(converter=str, metadata={'name': 'Filename'}) + + @classmethod + def names(cls) -> typing.List[str]: + return [field.metadata['name'] for field in attr.fields(cls)] + + @classmethod + def from_info(cls, info: plotman.plotinfo.PlotInfo) -> "Row": + if info.started_at is None: + raise Exception(f'Unexpected None start time for file: {info.filename}') + + return cls( + plot_id=info.plot_id, + started_at=info.started_at.isoformat(), + date=info.started_at.date().isoformat(), # type: ignore[no-untyped-call] + size=info.plot_size, + buffer=info.buffer, + buckets=info.buckets, + threads=info.threads, + tmp_dir_1=info.tmp_dir1, + tmp_dir_2=info.tmp_dir2, + phase_1_duration_raw=info.phase1_duration_raw, + phase_1_duration=info.phase1_duration, + phase_1_duration_minutes=info.phase1_duration_minutes, + phase_1_duration_hours=info.phase1_duration_hours, + phase_2_duration_raw=info.phase2_duration_raw, + phase_2_duration=info.phase2_duration, + phase_2_duration_minutes=info.phase2_duration_minutes, + phase_2_duration_hours=info.phase2_duration_hours, + phase_3_duration_raw=info.phase3_duration_raw, + phase_3_duration=info.phase3_duration, + phase_3_duration_minutes=info.phase3_duration_minutes, + phase_3_duration_hours=info.phase3_duration_hours, + phase_4_duration_raw=info.phase4_duration_raw, + phase_4_duration=info.phase4_duration, + phase_4_duration_minutes=info.phase4_duration_minutes, + phase_4_duration_hours=info.phase4_duration_hours, + total_time_raw=info.total_time_raw, + total_time=info.total_time, + total_time_minutes=info.total_time_minutes, + total_time_hours=info.total_time_hours, + copy_time_raw=info.copy_time_raw, + copy_time=info.copy_time, + copy_time_minutes=info.copy_time_minutes, + copy_time_hours=info.copy_time_hours, + filename=info.filename, + ) + + def name_dict(self) -> typing.Dict[str, object]: + return { + field.metadata['name']: value + for field, value in zip(attr.fields(type(self)), attr.astuple(self)) + } + + +def key_on_plot_info_started_at(element: plotman.plotinfo.PlotInfo) -> pendulum.DateTime: + if element.started_at is None: + return pendulum.now().add(years=9999) + + return element.started_at + + +def parse_logs(logfilenames: typing.Sequence[str]) -> typing.List[plotman.plotinfo.PlotInfo]: + parser = PlotLogParser() + result = [] + + for filename in logfilenames: + with open(filename) as file: + info = parser.parse(file) + + if not info.in_progress(): + result.append(info) + + result.sort(key=key_on_plot_info_started_at) + + return result + + +def generate(logfilenames: typing.List[str], file: typing.TextIO) -> None: + writer = csv.DictWriter(file, fieldnames=Row.names()) + writer.writeheader() + + logs = parse_logs(logfilenames) + + for info in logs: + row = Row.from_info(info=info) + writer.writerow(rowdict=row.name_dict()) diff --git a/src/plotman/interactive.py b/src/plotman/interactive.py new file mode 100644 index 00000000..82ea89a8 --- /dev/null +++ b/src/plotman/interactive.py @@ -0,0 +1,361 @@ +import curses +import datetime +import locale +import math +import os +import subprocess +import sys +import typing + +from plotman import archive, configuration, manager, reporting +from plotman.job import Job + + +class TerminalTooSmallError(Exception): + pass + +class Log: + entries: typing.List[str] + cur_pos: int + + def __init__(self) -> None: + self.entries = [] + self.cur_pos = 0 + + # TODO: store timestamp as actual timestamp indexing the messages + def log(self, msg: str) -> None: + '''Log the message and scroll to the end of the log''' + ts = datetime.datetime.now().strftime('%m-%d %H:%M:%S') + self.entries.append(ts + ' ' + msg) + self.cur_pos = len(self.entries) + + def tail(self, num_entries: int) -> typing.List[str]: + '''Return the entries at the end of the log. Consider cur_slice() instead.''' + return self.entries[-num_entries:] + + def shift_slice(self, offset: int) -> None: + '''Positive shifts towards end, negative shifts towards beginning''' + self.cur_pos = max(0, min(len(self.entries), self.cur_pos + offset)) + + def shift_slice_to_end(self) -> None: + self.cur_pos = len(self.entries) + + def get_cur_pos(self) -> int: + return self.cur_pos + + def cur_slice(self, num_entries: int) -> typing.List[str]: + '''Return num_entries log entries up to the current slice position''' + return self.entries[max(0, self.cur_pos - num_entries) : self.cur_pos] + + def fill_log(self) -> None: + '''Add a bunch of stuff to the log. Useful for testing.''' + for i in range(100): + self.log('Log line %d' % i) + +def plotting_status_msg(active: bool, status: str) -> str: + if active: + return '(active) ' + status + else: + return '(inactive) ' + status + +def archiving_status_msg(configured: bool, active: bool, status: str) -> str: + if configured: + if active: + return '(active) ' + status + else: + return '(inactive) ' + status + else: + return '(not configured)' + +# cmd_autostart_plotting is the (optional) argument passed from the command line. May be None +def curses_main(stdscr: typing.Any, cmd_autostart_plotting: typing.Optional[bool], cmd_autostart_archiving: typing.Optional[bool], cfg: configuration.PlotmanConfig) -> None: + log = Log() + + if cmd_autostart_plotting is not None: + plotting_active = cmd_autostart_plotting + else: + plotting_active = cfg.commands.interactive.autostart_plotting + + archiving_configured = cfg.archiving is not None + + if not archiving_configured: + archiving_active = False + elif cmd_autostart_archiving is not None: + archiving_active = cmd_autostart_archiving + else: + archiving_active = cfg.commands.interactive.autostart_archiving + + plotting_status = '' # todo rename these msg? + archiving_status: typing.Union[bool, str, typing.Dict[str, object]] = '' + + stdscr.nodelay(True) # make getch() non-blocking + stdscr.timeout(2000) + + # Create windows. We'll size them in the main loop when we have their content. + header_win = curses.newwin(1, 1, 1, 0) + log_win = curses.newwin(1, 1, 1, 0) + jobs_win = curses.newwin(1, 1, 1, 0) + dirs_win = curses.newwin(1, 1, 1, 0) + + jobs = Job.get_running_jobs(cfg.logging.plots) + last_refresh = None + + pressed_key = '' # For debugging + + archdir_freebytes = None + aging_reason = None + + while True: + + # A full refresh scans for and reads info for running jobs from + # scratch (i.e., reread their logfiles). Otherwise we'll only + # initialize new jobs, and mostly rely on cached info. + do_full_refresh = False + elapsed = 0 # Time since last refresh, or zero if no prev. refresh + if last_refresh is None: + do_full_refresh = True + else: + elapsed = (datetime.datetime.now() - last_refresh).total_seconds() + do_full_refresh = elapsed >= cfg.scheduling.polling_time_s + + if not do_full_refresh: + jobs = Job.get_running_jobs(cfg.logging.plots, cached_jobs=jobs) + + else: + last_refresh = datetime.datetime.now() + jobs = Job.get_running_jobs(cfg.logging.plots) + + if plotting_active: + (started, msg) = manager.maybe_start_new_plot( + cfg.directories, cfg.scheduling, cfg.plotting, cfg.logging + ) + if (started): + if aging_reason is not None: + log.log(aging_reason) + aging_reason = None + log.log(msg) + plotting_status = '' + jobs = Job.get_running_jobs(cfg.logging.plots, cached_jobs=jobs) + else: + # If a plot is delayed for any reason other than stagger, log it + if msg.find("stagger") < 0: + aging_reason = msg + plotting_status = msg + + if cfg.archiving is not None: + if archiving_active: + archiving_status, log_messages = archive.spawn_archive_process(cfg.directories, cfg.archiving, cfg.logging, jobs) + for log_message in log_messages: + log.log(log_message) + + archdir_freebytes, log_messages = archive.get_archdir_freebytes(cfg.archiving) + for log_message in log_messages: + log.log(log_message) + + + # Get terminal size. Recommended method is stdscr.getmaxyx(), but this + # does not seem to work on some systems. It may be a bug in Python + # curses, maybe having to do with registering sigwinch handlers in + # multithreaded environments. See e.g. + # https://stackoverflow.com/questions/33906183#33906270 + # Alternative option is to call out to `stty size`. For now, we + # support both strategies, selected by a config option. + # TODO: also try shutil.get_terminal_size() + n_rows: int + n_cols: int + if cfg.user_interface.use_stty_size: + completed_process = subprocess.run( + ['stty', 'size'], check=True, encoding='utf-8', stdout=subprocess.PIPE + ) + elements = completed_process.stdout.split() + (n_rows, n_cols) = [int(v) for v in elements] + else: + (n_rows, n_cols) = map(int, stdscr.getmaxyx()) + + stdscr.clear() + stdscr.resize(n_rows, n_cols) + curses.resize_term(n_rows, n_cols) + + # + # Obtain and measure content + # + + # Directory prefixes, for abbreviation + tmp_prefix = os.path.commonpath(cfg.directories.tmp) + dst_dir = cfg.directories.get_dst_directories() + dst_prefix = os.path.commonpath(dst_dir) + if archdir_freebytes is not None: + archive_directories = list(archdir_freebytes.keys()) + if len(archive_directories) == 0: + arch_prefix = '' + else: + arch_prefix = os.path.commonpath(archive_directories) + + n_tmpdirs = len(cfg.directories.tmp) + + # Directory reports. + tmp_report = reporting.tmp_dir_report( + jobs, cfg.directories, cfg.scheduling, n_cols, 0, n_tmpdirs, tmp_prefix) + dst_report = reporting.dst_dir_report( + jobs, dst_dir, n_cols, dst_prefix) + if archdir_freebytes is not None: + arch_report = reporting.arch_dir_report(archdir_freebytes, n_cols, arch_prefix) + if not arch_report: + arch_report = '' + else: + arch_report = '' + + # + # Layout + # + + tmp_h = len(tmp_report.splitlines()) + tmp_w = len(max(tmp_report.splitlines(), key=len)) + 1 + dst_h = len(dst_report.splitlines()) + dst_w = len(max(dst_report.splitlines(), key=len)) + 1 + arch_h = len(arch_report.splitlines()) + 1 + arch_w = n_cols + + header_h = 3 + dirs_h = max(tmp_h, dst_h) + arch_h + remainder = n_rows - (header_h + dirs_h) + jobs_h = max(5, math.floor(remainder * 0.6)) + logs_h = n_rows - (header_h + jobs_h + dirs_h) + + header_pos = 0 + jobs_pos = header_pos + header_h + stdscr.resize(n_rows, n_cols) + dirs_pos = jobs_pos + jobs_h + logscreen_pos = dirs_pos + dirs_h + + linecap = n_cols - 1 + logs_h = n_rows - (header_h + jobs_h + dirs_h) + + try: + header_win = curses.newwin(header_h, n_cols, header_pos, 0) + log_win = curses.newwin(logs_h, n_cols, logscreen_pos, 0) + jobs_win = curses.newwin(jobs_h, n_cols, jobs_pos, 0) + dirs_win = curses.newwin(dirs_h, n_cols, dirs_pos, 0) + except Exception: + raise Exception('Failed to initialize curses windows, try a larger ' + 'terminal window.') + + # + # Write + # + + # Header + header_win.addnstr(0, 0, 'Plotman', linecap, curses.A_BOLD) + timestamp = datetime.datetime.now().strftime("%H:%M:%S") + refresh_msg = "now" if do_full_refresh else f"{int(elapsed)}s/{cfg.scheduling.polling_time_s}" + header_win.addnstr(f" {timestamp} (refresh {refresh_msg})", linecap) + header_win.addnstr(' |

lotting: ', linecap, curses.A_BOLD) + header_win.addnstr( + plotting_status_msg(plotting_active, plotting_status), linecap) + header_win.addnstr(' rchival: ', linecap, curses.A_BOLD) + header_win.addnstr( + archiving_status_msg(archiving_configured, + archiving_active, archiving_status), linecap) # type: ignore[arg-type] + + # Oneliner progress display + header_win.addnstr(1, 0, 'Jobs (%d): ' % len(jobs), linecap) + header_win.addnstr('[' + reporting.job_viz(jobs) + ']', linecap) + + # These are useful for debugging. + # header_win.addnstr(' term size: (%d, %d)' % (n_rows, n_cols), linecap) # Debuggin + # if pressed_key: + # header_win.addnstr(' (keypress %s)' % str(pressed_key), linecap) + header_win.addnstr(2, 0, 'Prefixes:', linecap, curses.A_BOLD) + header_win.addnstr(' tmp=', linecap, curses.A_BOLD) + header_win.addnstr(tmp_prefix, linecap) + header_win.addnstr(' dst=', linecap, curses.A_BOLD) + header_win.addnstr(dst_prefix, linecap) + if archiving_configured: + header_win.addnstr(' archive=', linecap, curses.A_BOLD) + header_win.addnstr(arch_prefix, linecap) + header_win.addnstr(' (remote)', linecap) + + + # Jobs + jobs_win.addstr(0, 0, reporting.status_report(jobs, n_cols, jobs_h, + tmp_prefix, dst_prefix)) + jobs_win.chgat(0, 0, curses.A_REVERSE) + + # Dirs + tmpwin_dstwin_gutter = 6 + + maxtd_h = max([tmp_h, dst_h]) + + tmpwin = curses.newwin( + tmp_h, tmp_w, + dirs_pos + int(maxtd_h - tmp_h), 0) + tmpwin.addstr(tmp_report) + tmpwin.chgat(0, 0, curses.A_REVERSE) + + dstwin = curses.newwin( + dst_h, dst_w, + dirs_pos + int((maxtd_h - dst_h) / 2), tmp_w + tmpwin_dstwin_gutter) + dstwin.addstr(dst_report) + dstwin.chgat(0, 0, curses.A_REVERSE) + + archwin = curses.newwin(arch_h, arch_w, dirs_pos + maxtd_h, 0) + archwin.addstr(0, 0, 'Archive dirs free space', curses.A_REVERSE) + archwin.addstr(1, 0, arch_report) + + # Log. Could use a pad here instead of managing scrolling ourselves, but + # this seems easier. + log_win.addnstr(0, 0, ('Log: %d (// to scroll)\n' % log.get_cur_pos() ), + linecap, curses.A_REVERSE) + for i, logline in enumerate(log.cur_slice(logs_h - 1)): + log_win.addnstr(i + 1, 0, logline, linecap) + + stdscr.noutrefresh() + header_win.noutrefresh() + jobs_win.noutrefresh() + tmpwin.noutrefresh() + dstwin.noutrefresh() + archwin.noutrefresh() + log_win.noutrefresh() + curses.doupdate() + + try: + key = stdscr.getch() + except KeyboardInterrupt: + key = ord('q') + + if key == curses.KEY_UP: + log.shift_slice(-1) + pressed_key = 'up' + elif key == curses.KEY_DOWN: + log.shift_slice(1) + pressed_key = 'dwn' + elif key == curses.KEY_END: + log.shift_slice_to_end() + pressed_key = 'end' + elif key == ord('p'): + plotting_active = not plotting_active + pressed_key = 'p' + elif key == ord('a'): + archiving_active = not archiving_active + pressed_key = 'a' + elif key == ord('q'): + break + else: + pressed_key = key + +def run_interactive(cfg: configuration.PlotmanConfig, autostart_plotting: typing.Optional[bool] = None, autostart_archiving: typing.Optional[bool] = None) -> None: + locale.setlocale(locale.LC_ALL, '') + code = locale.getpreferredencoding() + # Then use code as the encoding for str.encode() calls. + + try: + curses.wrapper( + curses_main, + cmd_autostart_plotting=autostart_plotting, + cmd_autostart_archiving=autostart_archiving, + cfg=cfg, + ) + except curses.error as e: + raise TerminalTooSmallError( + "Your terminal may be too small, try making it bigger.", + ) from e diff --git a/src/plotman/job.py b/src/plotman/job.py new file mode 100644 index 00000000..479b81f0 --- /dev/null +++ b/src/plotman/job.py @@ -0,0 +1,517 @@ +# TODO do we use all these? +import argparse +import contextlib +import functools +import logging +import os +import random +import re +import sys +import time +from datetime import datetime +from enum import Enum, auto +from subprocess import call +import typing + +import attr +import click +import pendulum +import psutil + +from plotman import chia + + +def job_phases_for_tmpdir(d: str, all_jobs: typing.List["Job"]) -> typing.List["Phase"]: + '''Return phase 2-tuples for jobs running on tmpdir d''' + return sorted([j.progress() for j in all_jobs if j.tmpdir == d]) + +def job_phases_for_dstdir(d: str, all_jobs: typing.List["Job"]) -> typing.List["Phase"]: + '''Return phase 2-tuples for jobs outputting to dstdir d''' + return sorted([j.progress() for j in all_jobs if j.dstdir == d]) + +def is_plotting_cmdline(cmdline: typing.List[str]) -> bool: + if cmdline and 'python' in cmdline[0].lower(): + cmdline = cmdline[1:] + return ( + len(cmdline) >= 3 + and 'chia' in cmdline[0] + and 'plots' == cmdline[1] + and 'create' == cmdline[2] + ) + +def parse_chia_plot_time(s: str) -> pendulum.DateTime: + # This will grow to try ISO8601 as well for when Chia logs that way + # TODO: unignore once fixed upstream + # https://github.com/sdispater/pendulum/pull/548 + return pendulum.from_format(s, 'ddd MMM DD HH:mm:ss YYYY', locale='en', tz=None) # type: ignore[arg-type] + +def parse_chia_plots_create_command_line( + command_line: typing.List[str], +) -> "ParsedChiaPlotsCreateCommand": + command_line = list(command_line) + # Parse command line args + if 'python' in command_line[0].lower(): + command_line = command_line[1:] + assert len(command_line) >= 3 + assert 'chia' in command_line[0] + assert 'plots' == command_line[1] + assert 'create' == command_line[2] + + all_command_arguments = command_line[3:] + + # nice idea, but this doesn't include -h + # help_option_names = command.get_help_option_names(ctx=context) + help_option_names = {'--help', '-h'} + + command_arguments = [ + argument + for argument in all_command_arguments + if argument not in help_option_names + ] + + # TODO: We could at some point do chia version detection and pick the + # associated command. For now we'll just use the latest one we have + # copied. + command = chia.commands.latest_command() + try: + context = command.make_context(info_name='', args=list(command_arguments)) + except click.ClickException as e: + error = e + params = {} + else: + error = None + params = context.params + + return ParsedChiaPlotsCreateCommand( + error=error, + help=len(all_command_arguments) > len(command_arguments), + parameters=params, + ) + +class ParsedChiaPlotsCreateCommand: + def __init__( + self, + error: click.ClickException, + help: bool, + parameters: typing.Dict[str, object], + ) -> None: + self.error = error + self.help = help + self.parameters = parameters + +@functools.total_ordering +@attr.frozen(order=False) +class Phase: + major: int = 0 + minor: int = 0 + known: bool = True + + def __lt__(self, other: "Phase") -> bool: + return ( + (not self.known, self.major, self.minor) + < (not other.known, other.major, other.minor) + ) + + @classmethod + def from_tuple(cls, t: typing.Tuple[typing.Optional[int], typing.Optional[int]]) -> "Phase": + if len(t) != 2: + raise Exception(f'phase must be created from 2-tuple: {t!r}') + + if None in t and not t[0] is t[1]: + raise Exception(f'phase can not be partially known: {t!r}') + + if t[0] is None: + return cls(known=False) + + return cls(major=t[0], minor=t[1]) # type: ignore[arg-type] + + @classmethod + def list_from_tuples( + cls, + l: typing.Sequence[typing.Tuple[typing.Optional[int], typing.Optional[int]]], + ) -> typing.List["Phase"]: + return [cls.from_tuple(t) for t in l] + + def __str__(self) -> str: + if not self.known: + return '?:?' + return f'{self.major}:{self.minor}' + +# TODO: be more principled and explicit about what we cache vs. what we look up +# dynamically from the logfile +class Job: + 'Represents a plotter job' + + logfile: str = '' + jobfile: str = '' + job_id: int = 0 + plot_id: str = '--------' + proc: psutil.Process + k: int + r: int + u: int + b: int + n: int + tmpdir: str + tmp2dir: str + dstdir: str + + @classmethod + def get_running_jobs( + cls, + logroot: str, + cached_jobs: typing.Sequence["Job"] = (), + ) -> typing.List["Job"]: + '''Return a list of running plot jobs. If a cache of preexisting jobs is provided, + reuse those previous jobs without updating their information. Always look for + new jobs not already in the cache.''' + jobs: typing.List[Job] = [] + cached_jobs_by_pid = { j.proc.pid: j for j in cached_jobs } + + with contextlib.ExitStack() as exit_stack: + processes = [] + + for process in psutil.process_iter(): + # Ignore processes which most likely have terminated between the time of + # iteration and data access. + with contextlib.suppress(psutil.NoSuchProcess, psutil.AccessDenied): + exit_stack.enter_context(process.oneshot()) + if is_plotting_cmdline(process.cmdline()): + processes.append(process) + + # https://github.com/ericaltendorf/plotman/pull/418 + # The experimental Chia GUI .deb installer launches plots + # in a manner that results in a parent and child process + # that both share the same command line and, as such, are + # both identified as plot processes. Only the child is + # really plotting. Filter out the parent. + + pids = {process.pid for process in processes} + ppids = {process.ppid() for process in processes} + wanted_pids = pids - ppids + + wanted_processes = [ + process + for process in processes + if process.pid in wanted_pids + ] + + for proc in wanted_processes: + if proc.pid in cached_jobs_by_pid.keys(): + jobs.append(cached_jobs_by_pid[proc.pid]) # Copy from cache + else: + with proc.oneshot(): + parsed_command = parse_chia_plots_create_command_line( + command_line=proc.cmdline(), + ) + if parsed_command.error is not None: + continue + job = Job( + proc=proc, + parsed_command=parsed_command, + logroot=logroot, + ) + if job.help: + continue + jobs.append(job) + + return jobs + + + def __init__( + self, + proc: psutil.Process, + parsed_command: ParsedChiaPlotsCreateCommand, + logroot: str, + ) -> None: + '''Initialize from an existing psutil.Process object. must know logroot in order to understand open files''' + self.proc = proc + # These are dynamic, cached, and need to be udpated periodically + self.phase = Phase(known=False) + + self.help = parsed_command.help + self.args = parsed_command.parameters + + # an example as of 1.0.5 + # { + # 'size': 32, + # 'num_threads': 4, + # 'buckets': 128, + # 'buffer': 6000, + # 'tmp_dir': '/farm/yards/901', + # 'final_dir': '/farm/wagons/801', + # 'override_k': False, + # 'num': 1, + # 'alt_fingerprint': None, + # 'pool_contract_address': None, + # 'farmer_public_key': None, + # 'pool_public_key': None, + # 'tmp2_dir': None, + # 'plotid': None, + # 'memo': None, + # 'nobitfield': False, + # 'exclude_final_dir': False, + # } + + self.k = self.args['size'] # type: ignore[assignment] + self.r = self.args['num_threads'] # type: ignore[assignment] + self.u = self.args['buckets'] # type: ignore[assignment] + self.b = self.args['buffer'] # type: ignore[assignment] + self.n = self.args['num'] # type: ignore[assignment] + self.tmpdir = self.args['tmp_dir'] # type: ignore[assignment] + self.tmp2dir = self.args['tmp2_dir'] # type: ignore[assignment] + self.dstdir = self.args['final_dir'] # type: ignore[assignment] + + plot_cwd: str = self.proc.cwd() + self.tmpdir = os.path.join(plot_cwd, self.tmpdir) + if self.tmp2dir is not None: + self.tmp2dir = os.path.join(plot_cwd, self.tmp2dir) + self.dstdir = os.path.join(plot_cwd, self.dstdir) + + # Find logfile (whatever file is open under the log root). The + # file may be open more than once, e.g. for STDOUT and STDERR. + for f in self.proc.open_files(): + if logroot in f.path: + if self.logfile: + assert self.logfile == f.path + else: + self.logfile = f.path + break + + if self.logfile: + # Initialize data that needs to be loaded from the logfile + self.init_from_logfile() +# TODO: turn this into logging or somesuch +# else: +# print('Found plotting process PID {pid}, but could not find ' +# 'logfile in its open files:'.format(pid = self.proc.pid)) +# for f in self.proc.open_files(): +# print(f.path) + + + + def init_from_logfile(self) -> None: + '''Read plot ID and job start time from logfile. Return true if we + find all the info as expected, false otherwise''' + assert self.logfile + # Try reading for a while; it can take a while for the job to get started as it scans + # existing plot dirs (especially if they are NFS). + found_id = False + found_log = False + for attempt_number in range(3): + with open(self.logfile, 'r') as f: + with contextlib.suppress(UnicodeDecodeError): + for line in f: + m = re.match('^ID: ([0-9a-f]*)', line) + if m: + self.plot_id = m.group(1) + found_id = True + m = re.match(r'^Starting phase 1/4:.*\.\.\. (.*)', line) + if m: + # Mon Nov 2 08:39:53 2020 + self.start_time = parse_chia_plot_time(m.group(1)) + found_log = True + break # Stop reading lines in file + + if found_id and found_log: + break # Stop trying + else: + time.sleep(1) # Sleep and try again + + # If we couldn't find the line in the logfile, the job is probably just getting started + # (and being slow about it). In this case, use the last metadata change as the start time. + # TODO: we never come back to this; e.g. plot_id may remain uninitialized. + # TODO: should we just use the process start time instead? + if not found_log: + self.start_time = pendulum.from_timestamp(os.path.getctime(self.logfile)) + + # Load things from logfile that are dynamic + self.update_from_logfile() + + def update_from_logfile(self) -> None: + self.set_phase_from_logfile() + + def set_phase_from_logfile(self) -> None: + assert self.logfile + + # Map from phase number to subphase number reached in that phase. + # Phase 1 subphases are , table1, table2, ... + # Phase 2 subphases are , table7, table6, ... + # Phase 3 subphases are , tables1&2, tables2&3, ... + # Phase 4 subphases are + phase_subphases = {} + + with open(self.logfile, 'r') as f: + with contextlib.suppress(UnicodeDecodeError): + for line in f: + # "Starting phase 1/4: Forward Propagation into tmp files... Sat Oct 31 11:27:04 2020" + m = re.match(r'^Starting phase (\d).*', line) + if m: + phase = int(m.group(1)) + phase_subphases[phase] = 0 + + # Phase 1: "Computing table 2" + m = re.match(r'^Computing table (\d).*', line) + if m: + phase_subphases[1] = max(phase_subphases[1], int(m.group(1))) + + # Phase 2: "Backpropagating on table 2" + m = re.match(r'^Backpropagating on table (\d).*', line) + if m: + phase_subphases[2] = max(phase_subphases[2], 7 - int(m.group(1))) + + # Phase 3: "Compressing tables 4 and 5" + m = re.match(r'^Compressing tables (\d) and (\d).*', line) + if m: + phase_subphases[3] = max(phase_subphases[3], int(m.group(1))) + + # TODO also collect timing info: + + # "Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020" + # for phase in ['1', '2', '3', '4']: + # m = re.match(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) + # data.setdefault.... + + # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 + # m = re.match(r'^Total time = (\d+.\d+) seconds.*', line) + # if m: + # data.setdefault(key, {}).setdefault('total time', []).append(float(m.group(1))) + + if phase_subphases: + phase = max(phase_subphases.keys()) + self.phase = Phase(major=phase, minor=phase_subphases[phase]) + else: + self.phase = Phase(major=0, minor=0) + + def progress(self) -> Phase: + '''Return a 2-tuple with the job phase and subphase (by reading the logfile)''' + return self.phase + + def plot_id_prefix(self) -> str: + return self.plot_id[:8] + + # TODO: make this more useful and complete, and/or make it configurable + def status_str_long(self) -> str: + return '{plot_id}\nk={k} r={r} b={b} u={u}\npid:{pid}\ntmp:{tmp}\ntmp2:{tmp2}\ndst:{dst}\nlogfile:{logfile}'.format( + plot_id = self.plot_id, + k = self.k, + r = self.r, + b = self.b, + u = self.u, + pid = self.proc.pid, + tmp = self.tmpdir, + tmp2 = self.tmp2dir, + dst = self.dstdir, + logfile = self.logfile + ) + + def print_logs(self, follow: bool = False) -> None: + with open(self.logfile, 'r') as f: + if follow: + line = '' + while True: + tmp = f.readline() + if tmp is not None: + line += tmp + if line.endswith("\n"): + print(line.rstrip('\n')) + line = '' + else: + time.sleep(0.1) + else: + print(f.read()) + + def to_dict(self) -> typing.Dict[str, object]: + '''Exports important information as dictionary.''' + return dict( + plot_id=self.plot_id[:8], + k=self.k, + tmp_dir=self.tmpdir, + dst_dir=self.dstdir, + progress=str(self.progress()), + tmp_usage=self.get_tmp_usage(), + pid=self.proc.pid, + run_status=self.get_run_status(), + mem_usage=self.get_mem_usage(), + time_wall=self.get_time_wall(), + time_user=self.get_time_user(), + time_sys=self.get_time_sys(), + time_iowait=self.get_time_iowait() + ) + + + def get_mem_usage(self) -> int: + # Total, inc swapped + return self.proc.memory_info().vms # type: ignore[no-any-return] + + def get_tmp_usage(self) -> int: + total_bytes = 0 + with contextlib.suppress(FileNotFoundError): + # The directory might not exist at this name, or at all, anymore + with os.scandir(self.tmpdir) as it: + for entry in it: + if self.plot_id in entry.name: + with contextlib.suppress(FileNotFoundError): + # The file might disappear; this being an estimate we don't care + total_bytes += entry.stat().st_size + return total_bytes + + def get_run_status(self) -> str: + '''Running, suspended, etc.''' + status = self.proc.status() + if status == psutil.STATUS_RUNNING: + return 'RUN' + elif status == psutil.STATUS_SLEEPING: + return 'SLP' + elif status == psutil.STATUS_DISK_SLEEP: + return 'DSK' + elif status == psutil.STATUS_STOPPED: + return 'STP' + else: + return self.proc.status() # type: ignore[no-any-return] + + def get_time_wall(self) -> int: + create_time = datetime.fromtimestamp(self.proc.create_time()) + return int((datetime.now() - create_time).total_seconds()) + + def get_time_user(self) -> int: + return int(self.proc.cpu_times().user) + + def get_time_sys(self) -> int: + return int(self.proc.cpu_times().system) + + def get_time_iowait(self) -> typing.Optional[int]: + cpu_times = self.proc.cpu_times() + iowait = getattr(cpu_times, 'iowait', None) + if iowait is None: + return None + + return int(iowait) + + def suspend(self, reason: str = '') -> None: + self.proc.suspend() + self.status_note = reason + + def resume(self) -> None: + self.proc.resume() + + def get_temp_files(self) -> typing.Set[str]: + # Prevent duplicate file paths by using set. + temp_files = set([]) + for f in self.proc.open_files(): + if any( + dir in f.path + for dir in [self.tmpdir, self.tmp2dir, self.dstdir] + if dir is not None + ): + temp_files.add(f.path) + return temp_files + + def cancel(self) -> None: + 'Cancel an already running job' + # We typically suspend the job as the first action in killing it, so it + # doesn't create more tmp files during death. However, terminate() won't + # complete if the job is supsended, so we also need to resume it. + # TODO: check that this is best practice for killing a job. + self.proc.resume() + self.proc.terminate() diff --git a/src/plotman/log_parser.py b/src/plotman/log_parser.py new file mode 100644 index 00000000..5c9eac19 --- /dev/null +++ b/src/plotman/log_parser.py @@ -0,0 +1,145 @@ +import os +import re +import typing + +from plotman.plotinfo import PlotInfo +import plotman.job + + +class PlotLogParser: + """Parser for a finished plotting job""" + + def parse(self, file: typing.TextIO) -> PlotInfo: + """Parses a single log and returns its info""" + entry = PlotInfo() + + matchers = [ + self.ignore_line, + self.plot_id, + self.plot_start_date, + self.plot_size, + self.buffer_size, + self.buckets, + self.threads, + self.plot_dirs, + self.phase1_duration, + self.phase2_duration, + self.phase3_duration, + self.phase4_duration, + self.total_time, + self.copy_time, + self.filename + ] + + for line in file: + for matcher in matchers: + if (matcher(line, entry)): + break + + return entry + + # ID: 3eb8a37981de1cc76187a36ed947ab4307943cf92967a7e166841186c7899e24 + def plot_id(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r'^ID: (.+)$', line) + if m: + entry.plot_id = m.group(1) + return m != None + + # Renamed final file from "/farm/wagons/801/abc.plot.2.tmp" to "/farm/wagons/801/abc.plot" + def filename(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r'^Renamed final file from ".+" to "(.+)"', line) + if m: + entry.filename = m.group(1) + return m != None + + # Time for phase 1 = 17571.981 seconds. CPU (178.600%) Sun Apr 4 23:53:42 2021 + def phase1_duration(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Time for phase 1 = (\d+\.\d+) seconds", line) + if m: + entry.phase1_duration_raw = float(m.group(1)) + return m != None + + # Time for phase 2 = 6911.621 seconds. CPU (71.780%) Mon Apr 5 01:48:54 2021 + def phase2_duration(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Time for phase 2 = (\d+\.\d+) seconds", line) + if m: + entry.phase2_duration_raw = float(m.group(1)) + return m != None + + # Time for phase 3 = 14537.188 seconds. CPU (82.730%) Mon Apr 5 05:51:11 2021 + def phase3_duration(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Time for phase 3 = (\d+\.\d+) seconds", line) + if m: + entry.phase3_duration_raw = float(m.group(1)) + return m != None + + # Time for phase 4 = 924.288 seconds. CPU (86.810%) Mon Apr 5 06:06:35 2021 + def phase4_duration(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Time for phase 4 = (\d+\.\d+) seconds", line) + if m: + entry.phase4_duration_raw = float(m.group(1)) + return m != None + + # Total time = 39945.080 seconds. CPU (123.100%) Mon Apr 5 06:06:35 2021 + def total_time(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Total time = (\d+\.\d+) seconds", line) + if m: + entry.total_time_raw = float(m.group(1)) + return m != None + + # Copy time = 501.696 seconds. CPU (23.860%) Sun May 9 22:52:41 2021 + def copy_time(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Copy time = (\d+\.\d+) seconds", line) + if m: + entry.copy_time_raw = float(m.group(1)) + return m != None + + # Starting plotting progress into temporary dirs: /farm/yards/901 and /farm/yards/901 + def plot_dirs(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Starting plotting progress into temporary dirs: (.+) and (.+)$", line) + if m: + entry.tmp_dir1 = m.group(1) + entry.tmp_dir2 = m.group(2) + return m != None + + # Using 4 threads of stripe size 65536 + def threads(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Using (\d+) threads of stripe size (\d+)", line) + if m: + entry.threads = int(m.group(1)) + return m != None + + # "^Using (\\d+) buckets" + def buckets(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Using (\d+) buckets", line) + if m: + entry.buckets = int(m.group(1)) + return m != None + + # Buffer size is: 4000MiB + def buffer_size(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r"^Buffer size is: (\d+)MiB", line) + if m: + entry.buffer = int(m.group(1)) + return m != None + + # Plot size is: 32 + def plot_size(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r'^Plot size is: (\d+)', line) + if m: + entry.plot_size = int(m.group(1)) + return m != None + + # Starting phase 1/4: Forward Propagation into tmp files... Sun May 9 17:36:12 2021 + def plot_start_date(self, line: str, entry: PlotInfo) -> bool: + m = re.search(r'^Starting phase 1/4: Forward Propagation into tmp files\.\.\. (.+)', line) + if m: + entry.started_at = plotman.job.parse_chia_plot_time(s=m.group(1)) + return m != None + + + # Ignore lines starting with Bucket + # Bucket 0 uniform sort. Ram: 3.250GiB, u_sort min: 0.563GiB, qs min: 0.281GiB. + def ignore_line(self, line: str, _: PlotInfo) -> bool: + m = re.search(r'^\tBucket', line) + return m != None \ No newline at end of file diff --git a/src/plotman/manager.py b/src/plotman/manager.py new file mode 100644 index 00000000..87be88f7 --- /dev/null +++ b/src/plotman/manager.py @@ -0,0 +1,204 @@ +import logging +import operator +import os +import random +import re +import subprocess +import sys +import time +import typing +from datetime import datetime + +import pendulum +import psutil + +# Plotman libraries +from plotman import \ + archive # for get_archdir_freebytes(). TODO: move to avoid import loop +from plotman import job, plot_util +import plotman.configuration + +# Constants +MIN = 60 # Seconds +HR = 3600 # Seconds + +MAX_AGE = 1000_000_000 # Arbitrary large number of seconds + +def dstdirs_to_furthest_phase(all_jobs: typing.List[job.Job]) -> typing.Dict[str, job.Phase]: + '''Return a map from dst dir to a phase tuple for the most progressed job + that is emitting to that dst dir.''' + result: typing.Dict[str, job.Phase] = {} + for j in all_jobs: + if not j.dstdir in result.keys() or result[j.dstdir] < j.progress(): + result[j.dstdir] = j.progress() + return result + +def dstdirs_to_youngest_phase(all_jobs: typing.List[job.Job]) -> typing.Dict[str, job.Phase]: + '''Return a map from dst dir to a phase tuple for the least progressed job + that is emitting to that dst dir.''' + result: typing.Dict[str, job.Phase] = {} + for j in all_jobs: + if j.dstdir is None: + continue + if not j.dstdir in result.keys() or result[j.dstdir] > j.progress(): + result[j.dstdir] = j.progress() + return result + +def phases_permit_new_job(phases: typing.List[job.Phase], d: str, sched_cfg: plotman.configuration.Scheduling, dir_cfg: plotman.configuration.Directories) -> bool: + '''Scheduling logic: return True if it's OK to start a new job on a tmp dir + with existing jobs in the provided phases.''' + # Filter unknown-phase jobs + phases = [ph for ph in phases if ph.known] + + if len(phases) == 0: + return True + + milestone = job.Phase( + major=sched_cfg.tmpdir_stagger_phase_major, + minor=sched_cfg.tmpdir_stagger_phase_minor, + ) + # tmpdir_stagger_phase_limit default is 1, as declared in configuration.py + if len([p for p in phases if p < milestone]) >= sched_cfg.tmpdir_stagger_phase_limit: + return False + + # Limit the total number of jobs per tmp dir. Default to the overall max + # jobs configuration, but restrict to any configured overrides. + max_plots = sched_cfg.tmpdir_max_jobs + if dir_cfg.tmp_overrides is not None and d in dir_cfg.tmp_overrides: + curr_overrides = dir_cfg.tmp_overrides[d] + if curr_overrides.tmpdir_max_jobs is not None: + max_plots = curr_overrides.tmpdir_max_jobs + if len(phases) >= max_plots: + return False + + return True + +def maybe_start_new_plot(dir_cfg: plotman.configuration.Directories, sched_cfg: plotman.configuration.Scheduling, plotting_cfg: plotman.configuration.Plotting, log_cfg: plotman.configuration.Logging) -> typing.Tuple[bool, str]: + jobs = job.Job.get_running_jobs(log_cfg.plots) + + wait_reason = None # If we don't start a job this iteration, this says why. + + youngest_job_age = min(jobs, key=job.Job.get_time_wall).get_time_wall() if jobs else MAX_AGE + global_stagger = int(sched_cfg.global_stagger_m * MIN) + if (youngest_job_age < global_stagger): + wait_reason = 'stagger (%ds/%ds)' % (youngest_job_age, global_stagger) + elif len(jobs) >= sched_cfg.global_max_jobs: + wait_reason = 'max jobs (%d) - (%ds/%ds)' % (sched_cfg.global_max_jobs, youngest_job_age, global_stagger) + else: + tmp_to_all_phases = [(d, job.job_phases_for_tmpdir(d, jobs)) for d in dir_cfg.tmp] + eligible = [ (d, phases) for (d, phases) in tmp_to_all_phases + if phases_permit_new_job(phases, d, sched_cfg, dir_cfg) ] + rankable = [ (d, phases[0]) if phases else (d, job.Phase(known=False)) + for (d, phases) in eligible ] + + if not eligible: + wait_reason = 'no eligible tempdirs (%ds/%ds)' % (youngest_job_age, global_stagger) + else: + # Plot to oldest tmpdir. + tmpdir = max(rankable, key=operator.itemgetter(1))[0] + + dst_dirs = dir_cfg.get_dst_directories() + + dstdir: str + if dir_cfg.dst_is_tmp2(): + dstdir = dir_cfg.tmp2 # type: ignore[assignment] + elif tmpdir in dst_dirs: + dstdir = tmpdir + elif dir_cfg.dst_is_tmp(): + dstdir = tmpdir + else: + # Select the dst dir least recently selected + dir2ph = { d:ph for (d, ph) in dstdirs_to_youngest_phase(jobs).items() + if d in dst_dirs and ph is not None} + unused_dirs = [d for d in dst_dirs if d not in dir2ph.keys()] + dstdir = '' + if unused_dirs: + dstdir = random.choice(unused_dirs) + else: + def key(key: str) -> job.Phase: + return dir2ph[key] + dstdir = max(dir2ph, key=key) + + log_file_path = log_cfg.create_plot_log_path(time=pendulum.now()) + + plot_args: typing.List[str] = ['chia', 'plots', 'create', + '-k', str(plotting_cfg.k), + '-r', str(plotting_cfg.n_threads), + '-u', str(plotting_cfg.n_buckets), + '-b', str(plotting_cfg.job_buffer), + '-t', tmpdir, + '-d', dstdir ] + if plotting_cfg.e: + plot_args.append('-e') + if plotting_cfg.farmer_pk is not None: + plot_args.append('-f') + plot_args.append(plotting_cfg.farmer_pk) + if plotting_cfg.pool_pk is not None: + plot_args.append('-p') + plot_args.append(plotting_cfg.pool_pk) + if plotting_cfg.pool_contract_address is not None: + plot_args.append('-c') + plot_args.append(plotting_cfg.pool_contract_address) + if dir_cfg.tmp2 is not None: + plot_args.append('-2') + plot_args.append(dir_cfg.tmp2) + if plotting_cfg.x: + plot_args.append('-x') + + logmsg = ('Starting plot job: %s ; logging to %s' % (' '.join(plot_args), log_file_path)) + + # TODO: CAMPid 09840103109429840981397487498131 + try: + open_log_file = open(log_file_path, 'x') + except FileExistsError: + # The desired log file name already exists. Most likely another + # plotman process already launched a new process in response to + # the same scenario that triggered us. Let's at least not + # confuse things further by having two plotting processes + # logging to the same file. If we really should launch another + # plotting process, we'll get it at the next check cycle anyways. + message = ( + f'Plot log file already exists, skipping attempt to start a' + f' new plot: {log_file_path!r}' + ) + return (False, logmsg) + except FileNotFoundError as e: + message = ( + f'Unable to open log file. Verify that the directory exists' + f' and has proper write permissions: {log_file_path!r}' + ) + raise Exception(message) from e + + # Preferably, do not add any code between the try block above + # and the with block below. IOW, this space intentionally left + # blank... As is, this provides a good chance that our handle + # of the log file will get closed explicitly while still + # allowing handling of just the log file opening error. + + if sys.platform == 'win32': + creationflags = subprocess.CREATE_NO_WINDOW + nice = psutil.BELOW_NORMAL_PRIORITY_CLASS + else: + creationflags = 0 + nice = 15 + + with open_log_file: + # start_new_sessions to make the job independent of this controlling tty (POSIX only). + # subprocess.CREATE_NO_WINDOW to make the process independent of this controlling tty and have no console window on Windows. + p = subprocess.Popen(plot_args, + stdout=open_log_file, + stderr=subprocess.STDOUT, + start_new_session=True, + creationflags=creationflags) + + psutil.Process(p.pid).nice(nice) + return (True, logmsg) + + return (False, wait_reason) + +def select_jobs_by_partial_id(jobs: typing.List[job.Job], partial_id: str) -> typing.List[job.Job]: + selected = [] + for j in jobs: + if j.plot_id.startswith(partial_id): + selected.append(j) + return selected diff --git a/src/plotman/plot_util.py b/src/plotman/plot_util.py new file mode 100644 index 00000000..37215527 --- /dev/null +++ b/src/plotman/plot_util.py @@ -0,0 +1,142 @@ +import math +import os +import re +import shutil +import typing + +from plotman import chiapos +import plotman.job + +GB = 1_000_000_000 + +def df_b(d: str) -> int: + 'Return free space for directory (in bytes)' + usage = shutil.disk_usage(d) + return usage.free + +def get_k32_plotsize() -> int: + return get_plotsize(32) + +def get_plotsize(k: int) -> int: + return (int)(_get_plotsize_scaler(k) * k * pow(2, k)) + +def human_format(num: float, precision: int, powerOfTwo: bool = False) -> str: + divisor = 1024 if powerOfTwo else 1000 + + magnitude = 0 + while abs(num) >= divisor: + magnitude += 1 + num /= divisor + result = (('%.' + str(precision) + 'f%s') % + (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])) + + if powerOfTwo and magnitude > 0: + result += 'i' + + return result + +def time_format(sec: typing.Optional[int]) -> str: + if sec is None: + return '-' + if sec < 60: + return '%ds' % sec + else: + return '%d:%02d' % (int(sec / 3600), int((sec % 3600) / 60)) + +def split_path_prefix(items: typing.List[str]) -> typing.Tuple[str, typing.List[str]]: + if not items: + return ('', []) + + prefix = os.path.commonpath(items) + if prefix == '/': + return ('', items) + else: + remainders = [ os.path.relpath(i, prefix) for i in items ] + return (prefix, remainders) + +def list_k32_plots(d: str) -> typing.List[str]: + 'List completed k32 plots in a directory (not recursive)' + plots = [] + for plot in os.listdir(d): + if re.match(r'^plot-k32-.*plot$', plot): + plot = os.path.join(d, plot) + try: + if os.stat(plot).st_size > (0.95 * get_k32_plotsize()): + plots.append(plot) + except FileNotFoundError: + continue + + return plots + +def column_wrap( + items: typing.Sequence[object], + n_cols: int, + filler: typing.Optional[str] = None, +) -> typing.List[typing.List[typing.Optional[object]]]: + '''Take items, distribute among n_cols columns, and return a set + of rows containing the slices of those columns.''' + rows: typing.List[typing.List[typing.Optional[object]]] = [] + n_rows = math.ceil(len(items) / n_cols) + for row in range(n_rows): + row_items = items[row : : n_rows] + # Pad and truncate + padded: typing.List[typing.Optional[object]] = [*row_items, *([filler] * n_cols)] + rows.append(list(padded[:n_cols])) + return rows + +# use k as index to get plotsize_scaler, note that 0 means the value is not calculated yet +# we can safely assume that k is never going to be greater than 100, due to the exponential nature of plot file size, this avoids using constants from chiapos +_plotsize_scaler_cache = [0.0 for _ in range(0, 101)] + +def calc_average_size_of_entry(k: int, table_index: int) -> float: + ''' + calculate the average size of entries in bytes, given k and table_index + ''' + # assumes that chia uses constant park size for each table + # it is approximately k/8, uses chia's actual park size calculation to get a more accurate estimation + return chiapos.CalculateParkSize(k, table_index) / chiapos.kEntriesPerPark + +def _get_probability_of_entries_kept(k: int, table_index: int) -> float: + ''' + get the probibility of entries in table of table_index that is not dropped + ''' + # the formula is derived from https://www.chia.net/assets/proof_of_space.pdf, section Space Required, p5 and pt + + if table_index > 5: + return 1 + + pow_2_k = 2**k + + if table_index == 5: + # p5 + return 1 - (1 - 2 / pow_2_k) ** pow_2_k # type: ignore[no-any-return] + else: + # pt + return 1 - (1 - 2 / pow_2_k) ** (_get_probability_of_entries_kept(k, table_index + 1) * pow_2_k) # type: ignore[no-any-return] + +def _get_plotsize_scaler(k: int) -> float: + ''' + get scaler for plot size so that the plot size can be calculated by scaler * k * 2 ** k + ''' + result = _plotsize_scaler_cache[k] + if result > 0: + return result + result = _get_plotsize_scaler_impl(k) + _plotsize_scaler_cache[k] = result + return result + +def _get_plotsize_scaler_impl(k: int) -> float: + ''' + get scaler for plot size so that the plot size can be calculated by scaler * k * 2 ** k + ''' + + result = 0.0 + # there are 7 tables + for i in range(1, 8): + probability = _get_probability_of_entries_kept(k, i) + average_size_of_entry = calc_average_size_of_entry(k, i) + scaler_for_table = probability * average_size_of_entry / k + result += scaler_for_table + + return result + diff --git a/src/plotman/plotinfo.py b/src/plotman/plotinfo.py new file mode 100644 index 00000000..b7c3a13f --- /dev/null +++ b/src/plotman/plotinfo.py @@ -0,0 +1,112 @@ +import typing + +import attr +import pendulum + + +@attr.mutable +class PlotInfo: + """Represents the results of a finished plot job""" + started_at: typing.Optional[pendulum.DateTime] = None + plot_id: str = "" + buckets: int = 0 + threads: int = 0 + buffer: int = 0 + plot_size: int = 0 + tmp_dir1: str = "" + tmp_dir2: str = "" + phase1_duration_raw: float = 0 + phase2_duration_raw: float = 0 + phase3_duration_raw: float = 0 + phase4_duration_raw: float = 0 + total_time_raw: float = 0 + copy_time_raw: float = 0 + filename: str = "" + + def in_progress(self) -> bool: + "The plot is in progress if no total time has been reported." + return self.total_time == 0 + + # Phase 1 duration + @property + def phase1_duration(self) -> int: + return round(self.phase1_duration_raw) + + @property + def phase1_duration_minutes(self) -> int: + return self.duration_to_minutes(self.phase1_duration_raw) + + @property + def phase1_duration_hours(self) -> float: + return self.duration_to_hours(self.phase1_duration_raw) + + # Phase 2 duration + @property + def phase2_duration(self) -> int: + return round(self.phase2_duration_raw) + + @property + def phase2_duration_minutes(self) -> int: + return self.duration_to_minutes(self.phase2_duration_raw) + + @property + def phase2_duration_hours(self) -> float: + return self.duration_to_hours(self.phase2_duration_raw) + + # Phase 3 duration + @property + def phase3_duration(self) -> int: + return round(self.phase3_duration_raw) + + @property + def phase3_duration_minutes(self) -> int: + return self.duration_to_minutes(self.phase3_duration_raw) + + @property + def phase3_duration_hours(self) -> float: + return self.duration_to_hours(self.phase3_duration_raw) + + # Phase 4 duration + @property + def phase4_duration(self) -> int: + return round(self.phase4_duration_raw) + + @property + def phase4_duration_minutes(self) -> int: + return self.duration_to_minutes(self.phase4_duration_raw) + + @property + def phase4_duration_hours(self) -> float: + return self.duration_to_hours(self.phase4_duration_raw) + + # Total time + @property + def total_time(self) -> int: + return round(self.total_time_raw) + + @property + def total_time_minutes(self) -> int: + return self.duration_to_minutes(self.total_time_raw) + + @property + def total_time_hours(self) -> float: + return self.duration_to_hours(self.total_time_raw) + + # Copy time + @property + def copy_time(self) -> int: + return round(self.copy_time_raw) + + @property + def copy_time_minutes(self) -> int: + return self.duration_to_minutes(self.copy_time_raw) + + @property + def copy_time_hours(self) -> float: + return self.duration_to_hours(self.copy_time_raw) + + def duration_to_minutes(self, duration: float) -> int: + return round(duration / 60) + + def duration_to_hours(self, duration: float) -> float: + return round(duration / 60 / 60, 2) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py new file mode 100644 index 00000000..a63b79f7 --- /dev/null +++ b/src/plotman/plotman.py @@ -0,0 +1,333 @@ +import argparse +import datetime +import importlib +import importlib.resources +import logging +import logging.handlers +import os +import glob +import random +from shutil import copyfile +import sys +import time +import typing + +import pendulum + +# Plotman libraries +from plotman import analyzer, archive, configuration, interactive, manager, plot_util, reporting, csv_exporter +from plotman import resources as plotman_resources +from plotman.job import Job + +class PlotmanArgParser: + def add_idprefix_arg(self, subparser: argparse.ArgumentParser) -> None: + subparser.add_argument( + 'idprefix', + type=str, + nargs='+', + help='disambiguating prefix of plot ID') + + def parse_args(self) -> typing.Any: + parser = argparse.ArgumentParser(description='Chia plotting manager.') + sp = parser.add_subparsers(dest='cmd') + + sp.add_parser('version', help='print the version') + + p_status = sp.add_parser('status', help='show current plotting status') + p_status.add_argument("--json", action="store_true", + help="export status report in json format") + + sp.add_parser('prometheus', help='show current plotting status in prometheus readable format') + + sp.add_parser('dirs', help='show directories info') + + p_interactive = sp.add_parser('interactive', help='run interactive control/monitoring mode') + p_interactive.add_argument('--autostart-plotting', action='store_true', default=None, dest='autostart_plotting') + p_interactive.add_argument('--no-autostart-plotting', action='store_false', default=None, dest='autostart_plotting') + p_interactive.add_argument('--autostart-archiving', action='store_true', default=None, dest='autostart_archiving') + p_interactive.add_argument('--no-autostart-archiving', action='store_false', default=None, dest='autostart_archiving') + + sp.add_parser('dsched', help='print destination dir schedule') + + sp.add_parser('plot', help='run plotting loop') + + sp.add_parser('archive', help='move completed plots to farming location') + + p_export = sp.add_parser('export', help='exports metadata from the plot logs as CSV') + p_export.add_argument('-o', dest='save_to', default=None, type=str, help='save to file. Optional, prints to stdout by default') + + p_config = sp.add_parser('config', help='display or generate plotman.yaml configuration') + sp_config = p_config.add_subparsers(dest='config_subcommand') + sp_config.add_parser('generate', help='generate a default plotman.yaml file and print path') + sp_config.add_parser('path', help='show path to current plotman.yaml file') + + p_details = sp.add_parser('details', help='show details for job') + self.add_idprefix_arg(p_details) + + p_logs = sp.add_parser('logs', help='fetch the logs for job') + + p_logs.add_argument('-f', '--follow', action='store_true', help='Follow log output') + self.add_idprefix_arg(p_logs) + + p_files = sp.add_parser('files', help='show temp files associated with job') + self.add_idprefix_arg(p_files) + + p_kill = sp.add_parser('kill', help='kill job (and cleanup temp files)') + self.add_idprefix_arg(p_kill) + + p_suspend = sp.add_parser('suspend', help='suspend job') + self.add_idprefix_arg(p_suspend) + + p_resume = sp.add_parser('resume', help='resume suspended job') + self.add_idprefix_arg(p_resume) + + p_analyze = sp.add_parser('analyze', help='analyze timing stats of completed jobs') + + p_analyze.add_argument('--clipterminals', + action='store_true', + help='Ignore first and last plot in a logfile, useful for ' + 'focusing on the steady-state in a staggered parallel ' + 'plotting test (requires plotting with -n>2)') + p_analyze.add_argument('--bytmp', + action='store_true', + help='slice by tmp dirs') + p_analyze.add_argument('--bybitfield', + action='store_true', + help='slice by bitfield/non-bitfield sorting') + p_analyze.add_argument('--logfile', type=str, nargs='+', default=None, + help='logfile(s) to analyze') + p_analyze.add_argument('--logdir', type=str, default=None, + help='directory containing multiple logfiles to analyze') + p_analyze.add_argument('--figfile', type=str, default=None, + help='figure to be created if logdir is passed') + + args = parser.parse_args() + return args + +def get_term_width() -> int: + try: + (rows_string, columns_string) = os.popen('stty size', 'r').read().split() + columns = int(columns_string) + except: + columns = 120 # 80 is typically too narrow. TODO: make a command line arg. + return columns + +class Iso8601Formatter(logging.Formatter): + def formatTime(self, record: logging.LogRecord, datefmt: typing.Optional[str] = None) -> str: + time = pendulum.from_timestamp(timestamp=record.created, tz='local') + return time.isoformat(timespec='microseconds') + +def main() -> None: + random.seed() + + pm_parser = PlotmanArgParser() + args = pm_parser.parse_args() + + if args.cmd == 'version': + import pkg_resources + print(pkg_resources.get_distribution('plotman')) + return + + elif args.cmd == 'config': + config_file_path = configuration.get_path() + if args.config_subcommand == 'path': + if os.path.isfile(config_file_path): + print(config_file_path) + return + print(f"No 'plotman.yaml' file exists at expected location: '{config_file_path}'") + print(f"To generate a default config file, run: 'plotman config generate'") + return + if args.config_subcommand == 'generate': + if os.path.isfile(config_file_path): + overwrite = None + while overwrite not in {"y", "n"}: + overwrite = input( + f"A 'plotman.yaml' file already exists at the default location: '{config_file_path}' \n\n" + "\tInput 'y' to overwrite existing file, or 'n' to exit without overwrite." + ).lower() + if overwrite == 'n': + print("\nExited without overrwriting file") + return + + # Copy the default plotman.yaml (packaged in plotman/resources/) to the user's config file path, + # creating the parent plotman file/directory if it does not yet exist + with importlib.resources.path(plotman_resources, "plotman.yaml") as default_config: + config_dir = os.path.dirname(config_file_path) + + os.makedirs(config_dir, exist_ok=True) + copyfile(default_config, config_file_path) + print(f"\nWrote default plotman.yaml to: {config_file_path}") + return + + if not args.config_subcommand: + print("No action requested, add 'generate' or 'path'.") + return + + config_path = configuration.get_path() + config_text = configuration.read_configuration_text(config_path) + preset_target_definitions_text = importlib.resources.read_text( + plotman_resources, "target_definitions.yaml", + ) + + cfg = configuration.get_validated_configs(config_text, config_path, preset_target_definitions_text) + + with cfg.setup(): + root_logger = logging.getLogger() + handler = logging.handlers.RotatingFileHandler( + backupCount=10, + encoding='utf-8', + filename=cfg.logging.application, + maxBytes=10_000_000, + ) + formatter = Iso8601Formatter(fmt='%(asctime)s: %(message)s') + handler.setFormatter(formatter) + root_logger.addHandler(handler) + root_logger.setLevel(logging.INFO) + root_logger.info('abc') + + # + # Stay alive, spawning plot jobs + # + if args.cmd == 'plot': + print('...starting plot loop') + while True: + wait_reason = manager.maybe_start_new_plot(cfg.directories, cfg.scheduling, cfg.plotting, cfg.logging) + + # TODO: report this via a channel that can be polled on demand, so we don't spam the console + if wait_reason: + print('...sleeping %d s: %s' % (cfg.scheduling.polling_time_s, wait_reason)) + + time.sleep(cfg.scheduling.polling_time_s) + + # + # Analysis of completed jobs + # + elif args.cmd == 'analyze': + + analyzer.analyze(args.logfile, args.clipterminals, + args.bytmp, args.bybitfield) + + # + # Exports log metadata to CSV + # + elif args.cmd == 'export': + logfilenames = glob.glob(os.path.join(cfg.logging.plots, '*.plot.log')) + if args.save_to is None: + csv_exporter.generate(logfilenames=logfilenames, file=sys.stdout) + else: + with open(args.save_to, 'w', encoding='utf-8') as file: + csv_exporter.generate(logfilenames=logfilenames, file=file) + + else: + jobs = Job.get_running_jobs(cfg.logging.plots) + + # Status report + if args.cmd == 'status': + if args.json: + # convert jobs list into json + result = reporting.json_report(jobs) + else: + result = "{0}\n\n{1}\n\nUpdated at: {2}".format( + reporting.status_report(jobs, get_term_width()), + reporting.summary(jobs), + datetime.datetime.today().strftime("%c"), + ) + print(result) + + # Prometheus report + if args.cmd == 'prometheus': + print(reporting.prometheus_report(jobs)) + + # Directories report + elif args.cmd == 'dirs': + print(reporting.dirs_report(jobs, cfg.directories, cfg.archiving, cfg.scheduling, get_term_width())) + + elif args.cmd == 'interactive': + interactive.run_interactive( + cfg=cfg, + autostart_plotting=args.autostart_plotting, + autostart_archiving=args.autostart_archiving, + ) + + # Start running archival + elif args.cmd == 'archive': + if cfg.archiving is None: + print('archiving not configured but is required for this command') + else: + print('...starting archive loop') + firstit = True + while True: + if not firstit: + print('Sleeping 60s until next iteration...') + time.sleep(60) + jobs = Job.get_running_jobs(cfg.logging.plots) + firstit = False + + archiving_status, log_messages = archive.spawn_archive_process(cfg.directories, cfg.archiving, cfg.logging, jobs) + for log_message in log_messages: + print(log_message) + + + # Debugging: show the destination drive usage schedule + elif args.cmd == 'dsched': + for (d, ph) in manager.dstdirs_to_furthest_phase(jobs).items(): + print(' %s : %s' % (d, str(ph))) + + # + # Job control commands + # + elif args.cmd in [ 'details', 'logs', 'files', 'kill', 'suspend', 'resume' ]: + print(args) + + selected = [] + + # TODO: clean up treatment of wildcard + if args.idprefix[0] == 'all': + selected = jobs + else: + # TODO: allow multiple idprefixes, not just take the first + selected = manager.select_jobs_by_partial_id(jobs, args.idprefix[0]) + if (len(selected) == 0): + print('Error: %s matched no jobs.' % args.idprefix[0]) + elif len(selected) > 1: + print('Error: "%s" matched multiple jobs:' % args.idprefix[0]) + for j in selected: + print(' %s' % j.plot_id) + selected = [] + + for job in selected: + if args.cmd == 'details': + print(job.status_str_long()) + + elif args.cmd == 'logs': + job.print_logs(args.follow) + + elif args.cmd == 'files': + temp_files = job.get_temp_files() + for f in temp_files: + print(' %s' % f) + + elif args.cmd == 'kill': + # First suspend so job doesn't create new files + print('Pausing PID %d, plot id %s' % (job.proc.pid, job.plot_id)) + job.suspend() + + temp_files = job.get_temp_files() + print('Will kill pid %d, plot id %s' % (job.proc.pid, job.plot_id)) + print('Will delete %d temp files' % len(temp_files)) + conf = input('Are you sure? ("y" to confirm): ') + if (conf != 'y'): + print('canceled. If you wish to resume the job, do so manually.') + else: + print('killing...') + job.cancel() + print('cleaning up temp files...') + for f in temp_files: + os.remove(f) + + elif args.cmd == 'suspend': + print('Suspending ' + job.plot_id) + job.suspend() + elif args.cmd == 'resume': + print('Resuming ' + job.plot_id) + job.resume() diff --git a/src/plotman/reporting.py b/src/plotman/reporting.py new file mode 100644 index 00000000..0a74d84d --- /dev/null +++ b/src/plotman/reporting.py @@ -0,0 +1,275 @@ +import time +import json +import math +import os +import typing + +import psutil +import texttable as tt # from somewhere? +from itertools import groupby +from plotman import archive, configuration, job, manager, plot_util + + +def abbr_path(path: str, putative_prefix: str) -> str: + if putative_prefix and path.startswith(putative_prefix): + return os.path.relpath(path, putative_prefix) + else: + return path + +def phases_str(phases: typing.List[job.Phase], max_num: typing.Optional[int] = None) -> str: + '''Take a list of phase-subphase pairs and return them as a compact string''' + if not max_num or len(phases) <= max_num: + return ' '.join([str(pair) for pair in phases]) + else: + n_first = math.floor(max_num / 2) + n_last = max_num - n_first + n_elided = len(phases) - (n_first + n_last) + first = ' '.join([str(pair) for pair in phases[:n_first]]) + elided = " [+%d] " % n_elided + last = ' '.join([str(pair) for pair in phases[n_first + n_elided:]]) + return first + elided + last + +def n_at_ph(jobs: typing.List[job.Job], ph: job.Phase) -> int: + return sum([1 for j in jobs if j.progress() == ph]) + +def n_to_char(n: int) -> str: + n_to_char_map = dict(enumerate(" .:;!")) + + if n < 0: + return 'X' # Should never be negative + elif n >= len(n_to_char_map): + n = len(n_to_char_map) - 1 + + return n_to_char_map[n] + +def job_viz(jobs: typing.List[job.Job]) -> str: + # TODO: Rewrite this in a way that ensures we count every job + # even if the reported phases don't line up with expectations. + result = '' + result += '1' + for i in range(0, 8): + result += n_to_char(n_at_ph(jobs, job.Phase(1, i))) + result += '2' + for i in range(0, 8): + result += n_to_char(n_at_ph(jobs, job.Phase(2, i))) + result += '3' + for i in range(0, 7): + result += n_to_char(n_at_ph(jobs, job.Phase(3, i))) + result += '4' + result += n_to_char(n_at_ph(jobs, job.Phase(4, 0))) + return result + +# Command: plotman status +# Shows a general overview of all running jobs +def status_report(jobs: typing.List[job.Job], width: int, height: typing.Optional[int] = None, tmp_prefix: str = '', dst_prefix: str = '') -> str: + '''height, if provided, will limit the number of rows in the table, + showing first and last rows, row numbers and an elipsis in the middle.''' + abbreviate_jobs_list = False + n_begin_rows = 0 + n_end_rows = 0 + if height and height < len(jobs) + 1: # One row for header + abbreviate_jobs_list = True + + n_rows = height - 2 # Minus one for header, one for ellipsis + n_begin_rows = int(n_rows / 2) + n_end_rows = n_rows - n_begin_rows + + tab = tt.Texttable() + headings = ['plot id', 'k', 'tmp', 'dst', 'wall', 'phase', 'tmp', + 'pid', 'stat', 'mem', 'user', 'sys', 'io'] + if height: + headings.insert(0, '#') + tab.header(headings) + tab.set_cols_dtype('t' * len(headings)) + tab.set_cols_align('r' * len(headings)) + tab.set_header_align('r' * len(headings)) + + for i, j in enumerate(sorted(jobs, key=job.Job.get_time_wall)): + # Elipsis row + if abbreviate_jobs_list and i == n_begin_rows: + row = ['...'] + ([''] * (len(headings) - 1)) + # Omitted row + elif abbreviate_jobs_list and i > n_begin_rows and i < (len(jobs) - n_end_rows): + continue + + # Regular row + else: + try: + with j.proc.oneshot(): + row = [j.plot_id[:8], # Plot ID + str(j.k), # k size + abbr_path(j.tmpdir, tmp_prefix), # Temp directory + abbr_path(j.dstdir, dst_prefix), # Destination directory + plot_util.time_format(j.get_time_wall()), # Time wall + str(j.progress()), # Overall progress (major:minor) + plot_util.human_format(j.get_tmp_usage(), 0), # Current temp file size + j.proc.pid, # System pid + j.get_run_status(), # OS status for the job process + plot_util.human_format(j.get_mem_usage(), 1, True), # Memory usage + plot_util.time_format(j.get_time_user()), # user system time + plot_util.time_format(j.get_time_sys()), # system time + plot_util.time_format(j.get_time_iowait()) # io wait + ] + except (psutil.NoSuchProcess, psutil.AccessDenied): + # In case the job has disappeared + row = [j.plot_id[:8]] + (['--'] * 12) + + if height: + row.insert(0, '%3d' % i) + + tab.add_row(row) + + tab.set_max_width(width) + tab.set_deco(0) # No borders + + return tab.draw() # type: ignore[no-any-return] + +def to_prometheus_format(metrics: typing.Dict[str, str], prom_stati: typing.Sequence[typing.Tuple[str, typing.Mapping[str, typing.Optional[int]]]]) -> typing.List[str]: + prom_str_list = [] + for metric_name, metric_desc in metrics.items(): + prom_str_list.append(f'# HELP {metric_name} {metric_desc}.') + prom_str_list.append(f'# TYPE {metric_name} gauge') + for label_str, values in prom_stati: + prom_str_list.append('%s{%s} %s' % (metric_name, label_str, values[metric_name])) + return prom_str_list + +def prometheus_report(jobs: typing.List[job.Job], tmp_prefix: str = '', dst_prefix: str = '') -> str: + metrics = { + 'plotman_plot_phase_major': 'The phase the plot is currently in', + 'plotman_plot_phase_minor': 'The part of the phase the plot is currently in', + 'plotman_plot_tmp_usage': 'Tmp dir usage in bytes', + 'plotman_plot_mem_usage': 'Memory usage in bytes', + 'plotman_plot_user_time': 'Processor time (user) in s', + 'plotman_plot_sys_time': 'Processor time (sys) in s', + 'plotman_plot_iowait_time': 'Processor time (iowait) in s', + } + prom_stati = [] + for j in jobs: + labels = { + 'plot_id': j.plot_id[:8], + 'tmp_dir': abbr_path(j.tmpdir, tmp_prefix), + 'dst_dir': abbr_path(j.dstdir, dst_prefix), + 'run_status': j.get_run_status(), + 'phase': str(j.progress()), + } + label_str = ','.join([f'{k}="{v}"' for k, v in labels.items()]) + values = { + 'plotman_plot_phase_major': j.progress().major, + 'plotman_plot_phase_minor': j.progress().minor, + 'plotman_plot_tmp_usage': j.get_tmp_usage(), + 'plotman_plot_mem_usage': j.get_mem_usage(), + 'plotman_plot_user_time': j.get_time_user(), + 'plotman_plot_sys_time': j.get_time_sys(), + 'plotman_plot_iowait_time': j.get_time_iowait(), + } + prom_stati += [(label_str, values)] + return '\n'.join(to_prometheus_format(metrics, prom_stati)) + +def summary(jobs: typing.List[job.Job], tmp_prefix: str = '') -> str: + """Creates a small summary of running jobs""" + + summary = [ + 'Total jobs: {0}'.format(len(jobs)) + ] + + # Number of jobs in each tmp disk + tmp_dir_paths = sorted([abbr_path(job.tmpdir, tmp_prefix) for job in jobs]) + for key, group in groupby(tmp_dir_paths, lambda dir: dir): + summary.append( + 'Jobs in {0}: {1}'.format(key, len(list(group))) + ) + + return '\n'.join(summary) + +def tmp_dir_report(jobs: typing.List[job.Job], dir_cfg: configuration.Directories, sched_cfg: configuration.Scheduling, width: int, start_row: typing.Optional[int] = None, end_row: typing.Optional[int] = None, prefix: str = '') -> str: + '''start_row, end_row let you split the table up if you want''' + tab = tt.Texttable() + headings = ['tmp', 'ready', 'phases'] + tab.header(headings) + tab.set_cols_dtype('t' * len(headings)) + tab.set_cols_align('r' * (len(headings) - 1) + 'l') + for i, d in enumerate(sorted(dir_cfg.tmp)): + if (start_row and i < start_row) or (end_row and i >= end_row): + continue + phases = sorted(job.job_phases_for_tmpdir(d, jobs)) + ready = manager.phases_permit_new_job(phases, d, sched_cfg, dir_cfg) + row = [abbr_path(d, prefix), 'OK' if ready else '--', phases_str(phases, 5)] + tab.add_row(row) + + tab.set_max_width(width) + tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER ) + tab.set_deco(0) # No borders + return tab.draw() # type: ignore[no-any-return] + +def dst_dir_report(jobs: typing.List[job.Job], dstdirs: typing.List[str], width: int, prefix: str='') -> str: + tab = tt.Texttable() + dir2oldphase = manager.dstdirs_to_furthest_phase(jobs) + dir2newphase = manager.dstdirs_to_youngest_phase(jobs) + headings = ['dst', 'plots', 'GBfree', 'inbnd phases', 'pri'] + tab.header(headings) + tab.set_cols_dtype('t' * len(headings)) + + for d in sorted(dstdirs): + # TODO: This logic is replicated in archive.py's priority computation, + # maybe by moving more of the logic in to directory.py + eldest_ph = dir2oldphase.get(d, job.Phase(0, 0)) + phases = job.job_phases_for_dstdir(d, jobs) + + dir_plots = plot_util.list_k32_plots(d) + gb_free = int(plot_util.df_b(d) / plot_util.GB) + n_plots = len(dir_plots) + priority = archive.compute_priority(eldest_ph, gb_free, n_plots) + row = [abbr_path(d, prefix), n_plots, gb_free, + phases_str(phases, 5), priority] + tab.add_row(row) + tab.set_max_width(width) + tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER ) + tab.set_deco(0) # No borders + return tab.draw() # type: ignore[no-any-return] + +def arch_dir_report(archdir_freebytes: typing.Dict[str, int], width: int, prefix: str = '') -> str: + cells = ['%s:%5dG' % (abbr_path(d, prefix), int(int(space) / plot_util.GB)) + for (d, space) in sorted(archdir_freebytes.items())] + if not cells: + return '' + + n_columns = int(width / (len(max(cells, key=len)) + 3)) + tab = tt.Texttable() + tab.set_max_width(width) + for row in plot_util.column_wrap(cells, n_columns, filler=''): + tab.add_row(row) + tab.set_cols_align('r' * (n_columns)) + tab.set_deco(tt.Texttable.VLINES) + return tab.draw() # type: ignore[no-any-return] + +# TODO: remove this +def dirs_report(jobs: typing.List[job.Job], dir_cfg: configuration.Directories, arch_cfg: typing.Optional[configuration.Archiving], sched_cfg: configuration.Scheduling, width: int) -> str: + dst_dir = dir_cfg.get_dst_directories() + reports = [ + tmp_dir_report(jobs, dir_cfg, sched_cfg, width), + dst_dir_report(jobs, dst_dir, width), + ] + if arch_cfg is not None: + freebytes, archive_log_messages = archive.get_archdir_freebytes(arch_cfg) + reports.extend([ + 'archive dirs free space:', + arch_dir_report(freebytes, width), + *archive_log_messages, + ]) + + return '\n'.join(reports) + '\n' + +def json_report(jobs: typing.List[job.Job]) -> str: + jobs_dicts = [] + for j in sorted(jobs, key=job.Job.get_time_wall): + with j.proc.oneshot(): + jobs_dicts.append(j.to_dict()) + + stuff = { + "jobs": jobs_dicts, + "total_jobs": len(jobs), + "updated": time.time(), + } + + return json.dumps(stuff) + diff --git a/src/plotman/resources/__init__.py b/src/plotman/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/plotman/resources/plotman.yaml b/src/plotman/resources/plotman.yaml new file mode 100644 index 00000000..09ee52ad --- /dev/null +++ b/src/plotman/resources/plotman.yaml @@ -0,0 +1,150 @@ +# Default/example plotman.yaml configuration file + +# https://github.com/ericaltendorf/plotman/wiki/Configuration#versions +version: [1] + +logging: + # One directory in which to store all plot job logs (the STDOUT/ + # STDERR of all plot jobs). In order to monitor progress, plotman + # reads these logs on a regular basis, so using a fast drive is + # recommended. + plots: /home/chia/chia/logs + # transfers: + # application: + +# Options for display and rendering +user_interface: + # Call out to the `stty` program to determine terminal size, instead of + # relying on what is reported by the curses library. In some cases, + # the curses library fails to update on SIGWINCH signals. If the + # `plotman interactive` curses interface does not properly adjust when + # you resize the terminal window, you can try setting this to True. + use_stty_size: True + +# Optional custom settings for the subcommands (status, interactive etc) +commands: + interactive: + # Set it to False if you don't want to auto start plotting when 'interactive' is ran. + # You can override this value from the command line, type "plotman interactive -h" for details + autostart_plotting: True + autostart_archiving: True + +# Where to plot and log. +directories: + # One or more directories to use as tmp dirs for plotting. The + # scheduler will use all of them and distribute jobs among them. + # It assumes that IO is independent for each one (i.e., that each + # one is on a different physical device). + # + # If multiple directories share a common prefix, reports will + # abbreviate and show just the uniquely identifying suffix. + tmp: + - /mnt/tmp/00 + - /mnt/tmp/01 + - /mnt/tmp/02 + - /mnt/tmp/03 + + # Optional: Allows overriding some characteristics of certain tmp + # directories. This contains a map of tmp directory names to + # attributes. If a tmp directory and attribute is not listed here, + # it uses the default attribute setting from the main configuration. + # + # Currently support override parameters: + # - tmpdir_max_jobs + tmp_overrides: + # In this example, /mnt/tmp/00 is larger than the other tmp + # dirs and it can hold more plots than the default. + "/mnt/tmp/00": + tmpdir_max_jobs: 5 + + # Optional: tmp2 directory. If specified, will be passed to + # chia plots create as -2. Only one tmp2 directory is supported. + # tmp2: /mnt/tmp/a + + # Optional: A list of one or more directories; the scheduler will + # use all of them. These again are presumed to be on independent + # physical devices so writes (plot jobs) and reads (archivals) can + # be scheduled to minimize IO contention. + # + # If dst is commented out, the tmp directories will be used as the + # buffer. + dst: + - /mnt/dst/00 + - /mnt/dst/01 + +# Archival configuration. Optional; if you do not wish to run the +# archiving operation, comment this section out. Almost everyone +# should be using the archival feature. It is meant to distribute +# plots among multiple disks filling them all. This can be done both +# to local and to remote disks. +# +# As of v0.4, archiving commands are highly configurable. The basic +# configuration consists of a script for checking available disk space +# and another for actually transferring plots. Each can be specified +# as either a path to an existing script or inline script contents. +# It is expected that most people will use existing recipes and will +# adjust them by specifying environment variables that will set their +# system specific values. These can be provided to the scripts via +# the `env` key. plotman will additionally provide `source` and +# `destination` environment variables to the transfer script so it +# knows the specifically selected items to process. plotman also needs +# to be able to generally detect if a transfer process is already +# running. To be able to identify externally launched transfers, the +# process name and an argument prefix to match must be provided. Note +# that variable substitution of environment variables including those +# specified in the env key can be used in both process name and process +# argument prefix elements but that they use the python substitution +# format. +# +# Complete example: https://github.com/ericaltendorf/plotman/wiki/Archiving +archiving: + target: local_rsync + env: + command: rsync + site_root: /farm/sites + +# Plotting scheduling parameters +scheduling: + # Run a job on a particular temp dir only if the number of existing jobs + # before [tmpdir_stagger_phase_major : tmpdir_stagger_phase_minor] + # is less than tmpdir_stagger_phase_limit. + # Phase major corresponds to the plot phase, phase minor corresponds to + # the table or table pair in sequence, phase limit corresponds to + # the number of plots allowed before [phase major : phase minor]. + # e.g, with default settings, a new plot will start only when your plot + # reaches phase [2 : 1] on your temp drive. This setting takes precidence + # over global_stagger_m + tmpdir_stagger_phase_major: 2 + tmpdir_stagger_phase_minor: 1 + # Optional: default is 1 + tmpdir_stagger_phase_limit: 1 + + # Don't run more than this many jobs at a time on a single temp dir. + tmpdir_max_jobs: 3 + + # Don't run more than this many jobs at a time in total. + global_max_jobs: 12 + + # Don't run any jobs (across all temp dirs) more often than this, in minutes. + global_stagger_m: 30 + + # How often the daemon wakes to consider starting a new plot job, in seconds. + polling_time_s: 20 + + +# Plotting parameters. These are pass-through parameters to chia plots create. +# See documentation at +# https://github.com/Chia-Network/chia-blockchain/wiki/CLI-Commands-Reference#create +plotting: + k: 32 + e: False # Use -e plotting option + n_threads: 2 # Threads per job + n_buckets: 128 # Number of buckets to split data into + job_buffer: 3389 # Per job memory + # If specified, pass through to the -f and -p options. See CLI reference. + # farmer_pk: ... + # pool_pk: ... + # If true, Skips adding [final dir] / dst to harvester for farming. + # Especially useful if you have harvesters that are running somewhere else + # and you are just plotting on the machine where plotman is running. + # x: True diff --git a/src/plotman/resources/target_definitions.yaml b/src/plotman/resources/target_definitions.yaml new file mode 100644 index 00000000..56c0951f --- /dev/null +++ b/src/plotman/resources/target_definitions.yaml @@ -0,0 +1,64 @@ +target_definitions: + local_rsync: + env: + command: rsync + options: --preallocate --remove-source-files --skip-compress plot --whole-file + site_root: null + + # The disk space script must return a line for each directory + # to consider archiving to with the following form. + # + # /some/path:1000000000000 + # + # That line tells plotman that it should consider archiving + # plots to files at paths such as /some/path/theplotid.plot and + # that there is 1TB of space available for use in that + # directory. + disk_space_script: | + #!/bin/bash + set -evx + site_root_stripped=$(echo "${site_root}" | sed 's;/\+$;;') + # printf with %.0f used to handle mawk such as in Ubuntu Docker images + # otherwise it saturates and you get saturated sizes like 2147483647 + df -aBK | grep " ${site_root_stripped}/" | awk '{ gsub(/K$/,"",$4); printf "%s:%.0f\n", $6, $4*1024 }' + transfer_script: | + #!/bin/bash + set -evx + "${command}" ${options} "${source}" "${destination}" + transfer_process_name: "{command}" + transfer_process_argument_prefix: "{site_root}" + rsyncd: + env: + # A value of null indicates a mandatory option + command: rsync + options: --bwlimit=80000 --preallocate --remove-source-files --skip-compress plot --whole-file + rsync_port: 873 + ssh_port: 22 + user: null + host: null + site_root: null + site: null + disk_space_script: | + #!/bin/bash + set -evx + site_root_stripped=$(echo "${site_root}" | sed 's;/\+$;;') + # printf with %.0f used to handle mawk such as in Ubuntu Docker images + # otherwise it saturates and you get saturated sizes like 2147483647 + ssh -p "${ssh_port}" "${user}@${host}" "df -aBK | grep \" $(echo "${site_root_stripped}" | sed 's;/\+$;;')/\" | awk '{ gsub(/K\$/,\"\",\$4); printf \"%s:%.0f\n\", \$6, \$4*1024 }'" + transfer_script: | + #!/bin/bash + set -evx + echo Launching transfer activity + relative_path=$(realpath --canonicalize-missing --relative-to="${site_root}" "${destination}") + url_root="rsync://${user}@${host}:${rsync_port}/${site}" + "${command}" ${options} "${source}" "${url_root}/${relative_path}" + transfer_process_name: "{command}" + transfer_process_argument_prefix: "rsync://{user}@{host}:{rsync_port}/{site}" +# external_script: +# env: +# some_common_value_with_a_default: /a/path +# some_mandatory option: null +# disk_space_path: /home/me/my_disk_space_script.sh +# transfer_path: /home/me/my_transfer_script.sh +# transfer_process_name: rsync +# transfer_process_argument_prefix: /the/destination/directory/root diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..77170cb1 --- /dev/null +++ b/tox.ini @@ -0,0 +1,36 @@ +[tox] +envlist = test-py{37,38,39} + +[testenv] +changedir = {envtmpdir} +setenv = + COVERAGE_FILE={toxinidir}/.coverage + +[testenv:test-py{37,38,39}] +extras = + test +commands = + pytest --capture=no --verbose --cov=plotman --cov-report=term-missing --cov-report=xml:{toxinidir}/coverage.xml --pyargs plotman + +[testenv:check-manifest] +extras = + checks +commands = + check-manifest --verbose {toxinidir} + +[testenv:check-coverage] +changedir = {toxinidir} +extras = + coverage +commands = + coverage combine coverage_reports/ + coverage xml -o coverage.xml + coverage report --fail-under=35 --ignore-errors --show-missing + diff-cover --fail-under=100 {posargs:--compare-branch=development} coverage.xml + +[testenv:check-hints-py{37,38,39}] +changedir = {toxinidir} +extras = + checks +commands = + mypy --package plotman diff --git a/util/listlogs b/util/listlogs new file mode 100644 index 00000000..e83ea776 --- /dev/null +++ b/util/listlogs @@ -0,0 +1,79 @@ +#!/bin/bash +#must run with bash or <<< redirections won't work +#created by @scry +if [[ $@ == *"-h"* || $@ == *"--help"* ]]; then + echo 'listlogs ' + echo ' ' + echo 'example: listlogs 2021-03-17-*' + echo ' ' + echo 'listlogs lists complete chia plot log times and filenames' + echo ' -h --help This screen' + echo ' -d List just filenames' + echo ' -t List just total times' + echo ' -T List just phase 1 times' + echo ' -s Sum of total times' + echo ' -S Sum of phase 1 times' + echo ' -c Return the count of logs with total times' + echo ' -a Return average of total times' + echo ' -A Return average of phase 1 times' + exit +fi +if [[ $@ == *"-A"* ]]; then + opt="-A" + in=${@#"$opt"} + sum=$(grep -iR "Time for phase 1" $in | cut -d' ' -f6 | paste -sd+ - | bc) + len=$(grep -iR "Time for phase 1" $in | wc -l) + bc <<< "scale=2; $sum/$len" + exit +fi +if [[ $@ == *"-a"* ]]; then + opt="-a" + foo=${@#"$opt"} + sum=$(grep -iR "Total time" $foo | cut -d' ' -f4 | paste -sd+ - | bc) + len=$(grep -iR "Total time" $foo | wc -l) + bc <<< "scale=2; $sum/$len" + exit +fi +if [[ $@ == *"-C"* ]]; then + opt="-C" + in=${@#"$opt"} + grep -iR "Time for phase 1" $in | wc -l + exit +fi +if [[ $@ == *"-c"* ]]; then + opt="-c" + in=${@#"$opt"} + grep -iR "Total time" $in | wc -l + exit +fi +if [[ $@ == *"-S"* ]]; then + opt="-S" + in=${@#"$opt"} + grep -iR "Time for phase 1" $in | cut -d' ' -f6 | paste -sd+ - | bc + exit +fi +if [[ $@ == *"-s"* ]]; then + opt="-s" + in=${@#"$opt"} + grep -iR "Total time" $in | cut -d' ' -f4 | paste -sd+ - | bc + exit +fi +if [[ $@ == *"-T"* ]]; then + opt="-T" + in=${@#"$opt"} + grep -iR "Time for phase 1" $in | cut -d' ' -f6 + exit +fi +if [[ $@ == *"-t"* ]]; then + opt="-t" + in=${@#"$opt"} + grep -iR "Total time" $in | cut -d' ' -f4 + exit +fi +if [[ $@ == *"-d"* ]]; then + opt="-d" + in=${@#"$opt"} + grep -iR "Total time" $in | cut -d: -f1,2,3 | cut -d/ -f3 + exit +fi +grep -iR "Total time" $@ From e909d1bf9d5c9ae579eac1d1268bc732e1d480d1 Mon Sep 17 00:00:00 2001 From: CountingShe3p Date: Mon, 21 Jun 2021 21:23:22 -0400 Subject: [PATCH 15/24] migrated graph --- setup.cfg | 3 + src/plotman/graph.py | 263 +++++++++++++++++++++++++++++++++++++++++ src/plotman/plotman.py | 18 +++ 3 files changed, 284 insertions(+) create mode 100644 src/plotman/graph.py diff --git a/setup.cfg b/setup.cfg index f922b21e..9d8f162e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -71,6 +71,9 @@ checks = mypy == 0.902 types-pkg_resources ~= 0.1.2 %(test)s +graph = + matplotlib ~= 3.4 + numpy ~= 1.20 [options.data_files] config = src/plotman/resources/plotman.yaml diff --git a/src/plotman/graph.py b/src/plotman/graph.py new file mode 100644 index 00000000..8ff7f301 --- /dev/null +++ b/src/plotman/graph.py @@ -0,0 +1,263 @@ +import os +import time, datetime +import re +import statistics +import sys +import argparse + +import numpy as np + +import matplotlib +import matplotlib.pyplot as plt + + +def create_ax_dumbbell(ax, data, max_stacked=50): + ''' + Create a dumbbell plot of concurrent plot instances over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + + def newline(p1, p2, color='r'): + l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color=color) + ax.add_line(l) + return l + + # Prevent the stack from growing to tall + num_rows = data.shape[0] + stacker = [] + for _ in range(int(np.ceil(num_rows / float(max_stacked)))): + stacker.extend(list(range(max_stacked))) + stacker = np.array(stacker) + if num_rows % float(max_stacked) != 0: + stacker = stacker[:-(max_stacked-int(num_rows % float(max_stacked)))] + + for (p1, p2), i in zip(data[:,:2], stacker): + newline([p1, i], [p2, i]) + ax.scatter(data[:,0], stacker, color='b') + ax.scatter(data[:,1], stacker, color='b') + + ax.set_ylabel('Plots') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotrate(ax, data, end=True, window=3): + ''' + Create a plot showing the rate of plotting over time. Can be computed + with respect to the plot start (this is rate of plot creation) or + with respect to the plot end (this is rate of plot completion). + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + end: T/F, compute plot creation or plot completion rate. + window: Window to compute rate over. + ''' + + def estimate_rate(data, window): + rate_list = [] + window_list = [] + # This takes care of when we dont have a full window + for i in range(window): + rate_list.append(data[i] - data[0]) + window_list.append(i) + # This takes care of when we do + for i in range(len(data) - window): + rate_list.append(data[i+window] - data[i]) + window_list.append(window) + rate_list, window_list = np.array(rate_list), np.array(window_list) + rate_list[rate_list == 0] = np.nan # This prevents div by zero error + return np.where(np.logical_not(np.isnan(rate_list)), (window_list-1) / rate_list, 0) + + # Estimate the rate of ending or the rate of starting + if end: + rate = estimate_rate(data[:,1], window) + ax.plot(data[:,1], rate) + else: + rate = estimate_rate(data[:,0], window) + ax.plot(data[:,0], rate) + + ax.set_ylabel('Avg Plot Rate (plots/hour)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plottime(ax, data, window=3): + ''' + Create a plot showing the average time to create a single plot. This is + computed using a moving average. Note that the plot may not be + very accurate for the beginning and ending windows. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + window: Window to compute rate over. + ''' + + # Compute moving avg + kernel = np.ones(window) / window + data_tiled = np.vstack(( + np.expand_dims(data[:,1] - data[:,0], axis=1), + np.tile(data[-1,1] - data[-1,0], (window-1, 1)) + )) + rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode='valid') + + ax.plot(data[:,1], rolling_avg) + + ax.set_ylabel('Avg Plot Time (hours)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def create_ax_plotcumulative(ax, data): + ''' + Create a plot showing the cumulative number of plots over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + ''' + ax.plot(data[:,1], range(data.shape[0])) + + ax.set_ylabel('Total plots (plots)') + ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + + +def graph(logfilenames, figfile, bytmp, bybitfield): + data = {} + logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if + os.path.splitext(l)[-1] == '.log'] + + for logfilename in logfilenames: + with open(logfilename, 'r') as f: + # Record of slicing and data associated with the slice + sl = 'x' # Slice key + phase_time = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + is_first_last = False + + # Read the logfile, triggering various behaviors on various + # regex matches. + for line in f: + # Beginning of plot job. We may encounter this multiple + # times, if a job was run with -n > 1. Sample log line: + # 2021-04-08T13:33:43.542 chia.plotting.create_plots : INFO Starting plot 1/5 + m = re.search(r'Starting plot (\d*)/(\d*)', line) + if m: + # (re)-initialize data structures + sl = 'x' # Slice key + phase_time = {} # Map from phase index to time + n_sorts = 0 + n_uniform = 0 + + seq_num = int(m.group(1)) + seq_total = int(m.group(2)) + is_first_last = seq_num == 1 or seq_num == seq_total + + # Temp dirs. Sample log line: + # Starting plotting progress into temporary dirs: /mnt/tmp/01 and /mnt/tmp/a + m = re.search(r'^Starting plotting.*dirs: (.*) and (.*)', line) + if m: + # Record tmpdir, if slicing by it + if bytmp: + tmpdir = m.group(1) + sl += '-' + tmpdir + + # Bitfield marker. Sample log line(s): + # Starting phase 2/4: Backpropagation without bitfield into tmp files... Mon Mar 1 03:56:11 2021 + # or + # Starting phase 2/4: Backpropagation into tmp files... Fri Apr 2 03:17:32 2021 + m = re.search(r'^Starting phase 2/4: Backpropagation', line) + if bybitfield and m: + if 'without bitfield' in line: + sl += '-nobitfield' + else: + sl += '-bitfield' + + # Phase timing. Sample log line: + # Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020 + for phase in ['1', '2', '3', '4']: + m = re.search(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) + if m: + phase_time[phase] = float(m.group(1)) + + # Uniform sort. Sample log line: + # Bucket 267 uniform sort. Ram: 0.920GiB, u_sort min: 0.688GiB, qs min: 0.172GiB. + # or + # ....?.... + # or + # Bucket 511 QS. Ram: 0.920GiB, u_sort min: 0.375GiB, qs min: 0.094GiB. force_qs: 1 + m = re.search(r'Bucket \d+ ([^\.]+)\..*', line) + if m and not 'force_qs' in line: + sorter = m.group(1) + n_sorts += 1 + if sorter == 'uniform sort': + n_uniform += 1 + elif sorter == 'QS': + pass + else: + print ('Warning: unrecognized sort ' + sorter) + + # Job completion. Record total time in sliced data store. + # Sample log line: + # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 + m = re.search(r'^Total time = (\d+.\d+) seconds.', line) + if m: + time_taken = float(m.group(1)) + data.setdefault(sl, {}).setdefault('total time', []).append(time_taken) + for phase in ['1', '2', '3', '4']: + data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) + data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) + + time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) + data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) + data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - time_taken) + + # Prepare report + for sl in data.keys(): + + # This array will hold start and end data (in hours) + data_started_ended = np.array([[ts, te, te-ts] for + ts, te in zip(data[sl]['time started'], data[sl]['time ended']) + ]) / (60 * 60) + + # Sift the data so that it starts at zero + data_started_ended -= np.min(data_started_ended[:, 0]) + + # Sort the rows by start time + data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] + + # Create figure + num_plots = 4 + f, _ = plt.subplots(2,1, figsize=(8, 12)) + ax = plt.subplot(num_plots,1,1) + ax.set_title('Plot performance summary') + + create_ax_dumbbell(ax, data_started_ended) + + ax = plt.subplot(num_plots,1,2) + create_ax_plotrate(ax, data_started_ended, end=True, window=3) + + ax = plt.subplot(num_plots,1,3) + create_ax_plottime(ax, data_started_ended, window=3) + + ax = plt.subplot(num_plots,1,4) + create_ax_plotcumulative(ax, data_started_ended) + + ax.set_xlabel('Time (hours)') + f.savefig(figfile) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='') + parser.add_argument( + 'log_dir', + help='directory containing logs to analyze.') + parser.add_argument( + '--bytmp', + action='store_true', + help='slice by tmp dirs') + parser.add_argument( + '--bybitfield', + action='store_true', + help='slice by bitfield/non-bitfield sorting') + args = parser.parse_args() + + analyze(args.log_dir, args.bytmp, args.bybitfield)n \ No newline at end of file diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index a63b79f7..9e28fb81 100644 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -101,6 +101,18 @@ def parse_args(self) -> typing.Any: p_analyze.add_argument('--figfile', type=str, default=None, help='figure to be created if logdir is passed') + p_graph = sp.add_parser('graph', help='create graph with plotting statistics') + p_graph.add_argument('--logdir', type=str, default=None, + help='directory containing multiple logfiles to analyze') + p_graph.add_argument('--figfile', type=str, default=None, + help='graph file produced as output (.png, .jpg, etc.)') + p_graph.add_argument('--bytmp', + action='store_true', + help='slice by tmp dirs') + p_graph.add_argument('--bybitfield', + action='store_true', + help='slice by bitfield/non-bitfield sorting') + args = parser.parse_args() return args @@ -207,6 +219,12 @@ def main() -> None: analyzer.analyze(args.logfile, args.clipterminals, args.bytmp, args.bybitfield) + # + # Graphing of completed jobs + # + elif args.cmd == 'graph': + graph.graph(args.logfile, args.figfile, args.bytmp, args.bybitfield) + # # Exports log metadata to CSV # From 95e1c635f66612f0dac40dd27a7edc8bb82b3842 Mon Sep 17 00:00:00 2001 From: nikwl Date: Sat, 7 Aug 2021 14:53:21 -0400 Subject: [PATCH 16/24] several fixes, added some cli arguments, should work now --- src/plotman/analyzer.py | 50 +--------- src/plotman/graph.py | 199 ++++++++++++---------------------------- src/plotman/plotman.py | 20 ++-- 3 files changed, 70 insertions(+), 199 deletions(-) diff --git a/src/plotman/analyzer.py b/src/plotman/analyzer.py index 3fa63902..8b1e49df 100644 --- a/src/plotman/analyzer.py +++ b/src/plotman/analyzer.py @@ -5,10 +5,6 @@ import typing import texttable as tt -import numpy as np - -import matplotlib -import matplotlib.pyplot as plt from plotman import plot_util @@ -114,48 +110,6 @@ def analyze(logfilenames: typing.List[str], clipterminals: bool, bytmp: bool, by data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) data.setdefault(sl, {}).setdefault('%usort', []).append(0) # Not available for MADMAX - # Grab the time ended, compute the time started - time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) - data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) - data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - float(m.group(1))) - - if figfile is not None: - # Prepare report - for sl in data.keys(): - - # This array will hold start and end data (in hours) - data_started_ended = np.array([[ts, te, te-ts] for - ts, te in zip(data[sl]['time started'], data[sl]['time ended']) - ]) / (60 * 60) - assert data_started_ended.shape[0] >= 3, 'Cannot generate figure with less than 3 datapoints ({} datapoints passed)'.format(data_started_ended.shape[0]) - - # Sift the data so that it starts at zero - data_started_ended -= np.min(data_started_ended[:, 0]) - - # Sort the rows by start time - data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] - - # Create figure - num_plots = 4 - f, _ = plt.subplots(2,1, figsize=(8, 12)) - ax = plt.subplot(num_plots,1,1) - ax.set_title('Plot performance summary') - - create_ax_dumbbell(ax, data_started_ended) - - ax = plt.subplot(num_plots,1,2) - create_ax_plotrate(ax, data_started_ended, end=True, window=3) - - ax = plt.subplot(num_plots,1,3) - create_ax_plottime(ax, data_started_ended, window=3) - - ax = plt.subplot(num_plots,1,4) - create_ax_plotcumulative(ax, data_started_ended) - - print('Saving analysis figure to {}'.format(figfile)) - ax.set_xlabel('Time (hours)') - f.savefig(figfile) - # Prepare report tab = tt.Texttable() all_measures = ['%usort', 'phase 1', 'phase 2', 'phase 3', 'phase 4', 'total time'] @@ -195,6 +149,4 @@ def analyze(logfilenames: typing.List[str], clipterminals: bool, bytmp: bool, by (rows, columns) = os.popen('stty size', 'r').read().split() tab.set_max_width(int(columns)) s = tab.draw() - print(s) - - + print(s) \ No newline at end of file diff --git a/src/plotman/graph.py b/src/plotman/graph.py index 8ff7f301..aa740a76 100644 --- a/src/plotman/graph.py +++ b/src/plotman/graph.py @@ -10,8 +10,10 @@ import matplotlib import matplotlib.pyplot as plt +from plotman.log_parser import PlotLogParser -def create_ax_dumbbell(ax, data, max_stacked=50): + +def create_ax_dumbbell(ax, data, max_stacked=50) -> None: ''' Create a dumbbell plot of concurrent plot instances over time. Parameters: @@ -42,7 +44,7 @@ def newline(p1, p2, color='r'): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plotrate(ax, data, end=True, window=3): +def create_ax_plotrate(ax, data, end=True, window=3) -> None: ''' Create a plot showing the rate of plotting over time. Can be computed with respect to the plot start (this is rate of plot creation) or @@ -54,7 +56,7 @@ def create_ax_plotrate(ax, data, end=True, window=3): window: Window to compute rate over. ''' - def estimate_rate(data, window): + def estimate_rate(data, window): rate_list = [] window_list = [] # This takes care of when we dont have a full window @@ -81,7 +83,7 @@ def estimate_rate(data, window): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plottime(ax, data, window=3): +def create_ax_plottime(ax, data, window=3) -> None: ''' Create a plot showing the average time to create a single plot. This is computed using a moving average. Note that the plot may not be @@ -106,7 +108,7 @@ def create_ax_plottime(ax, data, window=3): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plotcumulative(ax, data): +def create_ax_plotcumulative(ax, data) -> None: ''' Create a plot showing the cumulative number of plots over time. Parameters: @@ -119,145 +121,64 @@ def create_ax_plotcumulative(ax, data): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def graph(logfilenames, figfile, bytmp, bybitfield): - data = {} - logfilenames = [os.path.join(os.path.dirname(logfilenames), l) for l in os.listdir(logfilenames) if +def graph(logdir : str, figfile : str, latest_k : int, window : int) -> None: + assert window >= 2, "Cannot compute moving average over such a small window" + assert os.path.isdir(logdir) + + # Build a list of the logfiles + logdir = os.path.abspath(logdir) + logfilenames = [os.path.join(logdir, l) for l in os.listdir(logdir) if os.path.splitext(l)[-1] == '.log'] + assert len(logfilenames) > 0, "Directory contains no files {}".format(logdir) + + # For each log file, extract the start, end, and duration + time_catter = [] + parser = PlotLogParser() for logfilename in logfilenames: with open(logfilename, 'r') as f: - # Record of slicing and data associated with the slice - sl = 'x' # Slice key - phase_time = {} # Map from phase index to time - n_sorts = 0 - n_uniform = 0 - is_first_last = False - - # Read the logfile, triggering various behaviors on various - # regex matches. - for line in f: - # Beginning of plot job. We may encounter this multiple - # times, if a job was run with -n > 1. Sample log line: - # 2021-04-08T13:33:43.542 chia.plotting.create_plots : INFO Starting plot 1/5 - m = re.search(r'Starting plot (\d*)/(\d*)', line) - if m: - # (re)-initialize data structures - sl = 'x' # Slice key - phase_time = {} # Map from phase index to time - n_sorts = 0 - n_uniform = 0 - - seq_num = int(m.group(1)) - seq_total = int(m.group(2)) - is_first_last = seq_num == 1 or seq_num == seq_total - - # Temp dirs. Sample log line: - # Starting plotting progress into temporary dirs: /mnt/tmp/01 and /mnt/tmp/a - m = re.search(r'^Starting plotting.*dirs: (.*) and (.*)', line) - if m: - # Record tmpdir, if slicing by it - if bytmp: - tmpdir = m.group(1) - sl += '-' + tmpdir - - # Bitfield marker. Sample log line(s): - # Starting phase 2/4: Backpropagation without bitfield into tmp files... Mon Mar 1 03:56:11 2021 - # or - # Starting phase 2/4: Backpropagation into tmp files... Fri Apr 2 03:17:32 2021 - m = re.search(r'^Starting phase 2/4: Backpropagation', line) - if bybitfield and m: - if 'without bitfield' in line: - sl += '-nobitfield' - else: - sl += '-bitfield' - - # Phase timing. Sample log line: - # Time for phase 1 = 22796.7 seconds. CPU (98%) Tue Sep 29 17:57:19 2020 - for phase in ['1', '2', '3', '4']: - m = re.search(r'^Time for phase ' + phase + ' = (\d+.\d+) seconds..*', line) - if m: - phase_time[phase] = float(m.group(1)) - - # Uniform sort. Sample log line: - # Bucket 267 uniform sort. Ram: 0.920GiB, u_sort min: 0.688GiB, qs min: 0.172GiB. - # or - # ....?.... - # or - # Bucket 511 QS. Ram: 0.920GiB, u_sort min: 0.375GiB, qs min: 0.094GiB. force_qs: 1 - m = re.search(r'Bucket \d+ ([^\.]+)\..*', line) - if m and not 'force_qs' in line: - sorter = m.group(1) - n_sorts += 1 - if sorter == 'uniform sort': - n_uniform += 1 - elif sorter == 'QS': - pass - else: - print ('Warning: unrecognized sort ' + sorter) - - # Job completion. Record total time in sliced data store. - # Sample log line: - # Total time = 49487.1 seconds. CPU (97.26%) Wed Sep 30 01:22:10 2020 - m = re.search(r'^Total time = (\d+.\d+) seconds.', line) - if m: - time_taken = float(m.group(1)) - data.setdefault(sl, {}).setdefault('total time', []).append(time_taken) - for phase in ['1', '2', '3', '4']: - data.setdefault(sl, {}).setdefault('phase ' + phase, []).append(phase_time[phase]) - data.setdefault(sl, {}).setdefault('%usort', []).append(100 * n_uniform // n_sorts) - - time_ended = time.mktime(datetime.datetime.strptime(line.split(')')[-1][1:-1], '%a %b %d %H:%M:%S %Y').timetuple()) - data.setdefault(sl, {}).setdefault('time ended', []).append(time_ended) - data.setdefault(sl, {}).setdefault('time started', []).append(time_ended - time_taken) - - # Prepare report - for sl in data.keys(): - - # This array will hold start and end data (in hours) - data_started_ended = np.array([[ts, te, te-ts] for - ts, te in zip(data[sl]['time started'], data[sl]['time ended']) - ]) / (60 * 60) - - # Sift the data so that it starts at zero - data_started_ended -= np.min(data_started_ended[:, 0]) - - # Sort the rows by start time - data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] - - # Create figure - num_plots = 4 - f, _ = plt.subplots(2,1, figsize=(8, 12)) - ax = plt.subplot(num_plots,1,1) - ax.set_title('Plot performance summary') - - create_ax_dumbbell(ax, data_started_ended) + info = parser.parse(f) + if info.total_time_raw != 0: + time_catter.append( + [ + info.started_at.timestamp(), + info.started_at.timestamp() + info.total_time_raw, + info.total_time_raw + ] + ) + + assert len(time_catter) > 0, "No valid log files found, need a finished plot" + + # This array will hold start and end data (in hours) + data_started_ended = np.array(time_catter) / (60 * 60) + + # Shift the data so that it starts at zero + data_started_ended -= np.min(data_started_ended[:, 0]) + # Sort the rows by start time + data_started_ended = data_started_ended[np.argsort(data_started_ended[:, 0])] + + # Remove older entries + if latest_k is not None: + data_started_ended = data_started_ended[-latest_k:, :] + + # Create figure + num_plots = 4 + f, _ = plt.subplots(2,1, figsize=(8, 10)) + ax = plt.subplot(num_plots,1,1) + ax.set_title('Plot performance summary') + + create_ax_dumbbell(ax, data_started_ended) + + if data_started_ended.shape[0] > window: ax = plt.subplot(num_plots,1,2) - create_ax_plotrate(ax, data_started_ended, end=True, window=3) + create_ax_plotrate(ax, data_started_ended, end=True, window=window) ax = plt.subplot(num_plots,1,3) - create_ax_plottime(ax, data_started_ended, window=3) - - ax = plt.subplot(num_plots,1,4) - create_ax_plotcumulative(ax, data_started_ended) - - ax.set_xlabel('Time (hours)') - f.savefig(figfile) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='') - parser.add_argument( - 'log_dir', - help='directory containing logs to analyze.') - parser.add_argument( - '--bytmp', - action='store_true', - help='slice by tmp dirs') - parser.add_argument( - '--bybitfield', - action='store_true', - help='slice by bitfield/non-bitfield sorting') - args = parser.parse_args() - - analyze(args.log_dir, args.bytmp, args.bybitfield)n \ No newline at end of file + create_ax_plottime(ax, data_started_ended, window=window) + + ax = plt.subplot(num_plots,1,4) + create_ax_plotcumulative(ax, data_started_ended) + + ax.set_xlabel('Time (hours)') + f.savefig(figfile) \ No newline at end of file diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 389b8ea5..6fe3145c 100644 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -15,7 +15,7 @@ import pendulum # Plotman libraries -from plotman import analyzer, archive, configuration, interactive, manager, plot_util, reporting, csv_exporter +from plotman import analyzer, archive, configuration, interactive, manager, plot_util, reporting, csv_exporter, graph from plotman import resources as plotman_resources from plotman.job import Job @@ -103,16 +103,14 @@ def parse_args(self) -> typing.Any: help='figure to be created if logdir is passed') p_graph = sp.add_parser('graph', help='create graph with plotting statistics') - p_graph.add_argument('--logdir', type=str, default=None, - help='directory containing multiple logfiles to analyze') - p_graph.add_argument('--figfile', type=str, default=None, + p_graph.add_argument('logdir', type=str, + help='directory containing multiple logfiles to graph') + p_graph.add_argument('figfile', type=str, help='graph file produced as output (.png, .jpg, etc.)') - p_graph.add_argument('--bytmp', - action='store_true', - help='slice by tmp dirs') - p_graph.add_argument('--bybitfield', - action='store_true', - help='slice by bitfield/non-bitfield sorting') + p_graph.add_argument('--latest_k', type=int, default=None, + help='if passed, will only graph statistics for the latest k plots') + p_graph.add_argument('--window', type=int, default=3, + help='window size to compute moving average over') args = parser.parse_args() return args @@ -224,7 +222,7 @@ def main() -> None: # Graphing of completed jobs # elif args.cmd == 'graph': - graph.graph(args.logfile, args.figfile, args.bytmp, args.bybitfield) + graph.graph(args.logdir, args.figfile, args.latest_k, args.window) # # Exports log metadata to CSV From 46260ad33956c7c8603a98aca24740a8834292eb Mon Sep 17 00:00:00 2001 From: nikwl Date: Sat, 28 Aug 2021 20:26:25 -0400 Subject: [PATCH 17/24] Fixed several discontinuities that I think were caused by the previous merge. Current state should exactly reflect ericaltendorf/plotman except in plotman.py and graph.py. Also added a changelog entry --- CHANGELOG.md | 4 +++ src/plotman/archive.py | 12 +++---- src/plotman/configuration.py | 10 ++++-- src/plotman/interactive.py | 12 +++++-- src/plotman/job.py | 48 ++++++++++++++++------------ src/plotman/plotman.py | 62 ++++++++++++++++++++++++------------ 6 files changed, 96 insertions(+), 52 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75f96622..dcdaa291 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#885](https://github.com/ericaltendorf/plotman/pull/885)) - `supervisord` now used in Docker image. ([#898](https://github.com/ericaltendorf/plotman/pull/898)) +- `plotman graph` command to create a matplotlib plot for completed + plots ([#612](https://github.com/ericaltendorf/plotman/pull/612)). + Creates a graph image showing plots over time, average plot rage, + average plot time, and total number of plots over time. ## [0.5.1] - 2021-07-15 ### Fixed diff --git a/src/plotman/archive.py b/src/plotman/archive.py index 136f2bab..0101651a 100644 --- a/src/plotman/archive.py +++ b/src/plotman/archive.py @@ -18,7 +18,7 @@ from plotman import configuration, job, manager, plot_util -logger = logging.getLogger(__name__) +disk_space_logger = logging.getLogger("disk_space") _WINDOWS = sys.platform == 'win32' @@ -164,15 +164,15 @@ def get_archdir_freebytes(arch_cfg: configuration.Archiving) -> typing.Tuple[typ archdir_freebytes[archdir.strip()] = freebytes for line in log_messages: - logger.info(line) + disk_space_logger.info(line) - logger.info('stdout from disk space script:') + disk_space_logger.info('stdout from disk space script:') for line in stdout.splitlines(): - logger.info(f' {line}') + disk_space_logger.info(f' {line}') - logger.info('stderr from disk space script:') + disk_space_logger.info('stderr from disk space script:') for line in stderr.splitlines(): - logger.info(f' {line}') + disk_space_logger.info(f' {line}') return archdir_freebytes, log_messages diff --git a/src/plotman/configuration.py b/src/plotman/configuration.py index 77c62f90..b0f4a178 100644 --- a/src/plotman/configuration.py +++ b/src/plotman/configuration.py @@ -45,7 +45,6 @@ def read_configuration_text(config_path: str) -> str: def get_validated_configs(config_text: str, config_path: str, preset_target_definitions_text: str) -> "PlotmanConfig": """Return a validated instance of PlotmanConfig with data from plotman.yaml - :raises ConfigurationException: Raised when plotman.yaml is either missing or malformed """ schema = desert.schema(PlotmanConfig) @@ -268,7 +267,8 @@ class Logging: plots: str = os.path.join(appdirs.user_data_dir("plotman"), 'plots') transfers: str = os.path.join(appdirs.user_data_dir("plotman"), 'transfers') application: str = os.path.join(appdirs.user_log_dir("plotman"), 'plotman.log') - + disk_spaces: str = os.path.join(appdirs.user_log_dir("plotman"), 'plotman-disk_spaces.log') + def setup(self) -> None: os.makedirs(self.plots, exist_ok=True) os.makedirs(self.transfers, exist_ok=True) @@ -287,6 +287,12 @@ def create_transfer_log_path(self, time: pendulum.DateTime) -> str: directory=self.transfers, group='transfer', ) + def create_tdisk_space_log_path(self, time: pendulum.DateTime) -> str: + return self._create_log_path( + time=time, + directory=self.disk_spaces, + group='disk_space', + ) def _create_log_path(self, time: pendulum.DateTime, directory: str, group: str) -> str: timestamp = time.isoformat(timespec='microseconds').replace(':', '_') diff --git a/src/plotman/interactive.py b/src/plotman/interactive.py index 82ea89a8..70ba7c56 100644 --- a/src/plotman/interactive.py +++ b/src/plotman/interactive.py @@ -6,11 +6,12 @@ import subprocess import sys import typing +import logging from plotman import archive, configuration, manager, reporting from plotman.job import Job - +root_logger = logging.getLogger() class TerminalTooSmallError(Exception): pass @@ -141,12 +142,17 @@ def curses_main(stdscr: typing.Any, cmd_autostart_plotting: typing.Optional[bool if msg.find("stagger") < 0: aging_reason = msg plotting_status = msg + root_logger.info('[plot] %s', msg) if cfg.archiving is not None: if archiving_active: archiving_status, log_messages = archive.spawn_archive_process(cfg.directories, cfg.archiving, cfg.logging, jobs) - for log_message in log_messages: - log.log(log_message) + if log_messages: + for log_message in log_messages: + log.log(log_message) + root_logger.info('[archive] %s', log_message) + else: + root_logger.info('[archive] %s', archiving_status) archdir_freebytes, log_messages = archive.get_archdir_freebytes(cfg.archiving) for log_message in log_messages: diff --git a/src/plotman/job.py b/src/plotman/job.py index 2ba90a8c..ff351051 100644 --- a/src/plotman/job.py +++ b/src/plotman/job.py @@ -189,12 +189,17 @@ def get_running_jobs( with contextlib.ExitStack() as exit_stack: processes = [] + pids = set() + ppids = set() + for process in psutil.process_iter(): # Ignore processes which most likely have terminated between the time of # iteration and data access. with contextlib.suppress(psutil.NoSuchProcess, psutil.AccessDenied): exit_stack.enter_context(process.oneshot()) if is_plotting_cmdline(process.cmdline()): + ppids.add(process.ppid()) + pids.add(process.pid) processes.append(process) # https://github.com/ericaltendorf/plotman/pull/418 @@ -204,8 +209,6 @@ def get_running_jobs( # both identified as plot processes. Only the child is # really plotting. Filter out the parent. - pids = {process.pid for process in processes} - ppids = {process.ppid() for process in processes} wanted_pids = pids - ppids wanted_processes = [ @@ -215,23 +218,28 @@ def get_running_jobs( ] for proc in wanted_processes: - if proc.pid in cached_jobs_by_pid.keys(): - jobs.append(cached_jobs_by_pid[proc.pid]) # Copy from cache - else: - with proc.oneshot(): - parsed_command = parse_chia_plots_create_command_line( - command_line=proc.cmdline(), - ) - if parsed_command.error is not None: - continue - job = Job( - proc=proc, - parsed_command=parsed_command, - logroot=logroot, - ) - if job.help: - continue - jobs.append(job) + with contextlib.suppress(psutil.NoSuchProcess, psutil.AccessDenied): + if proc.pid in cached_jobs_by_pid.keys(): + jobs.append(cached_jobs_by_pid[proc.pid]) # Copy from cache + else: + with proc.oneshot(): + command_line = list(proc.cmdline()) + if len(command_line) == 0: + # https://github.com/ericaltendorf/plotman/issues/610 + continue + parsed_command = parse_chia_plots_create_command_line( + command_line=command_line, + ) + if parsed_command.error is not None: + continue + job = cls( + proc=proc, + parsed_command=parsed_command, + logroot=logroot, + ) + if job.help: + continue + jobs.append(job) return jobs @@ -578,4 +586,4 @@ def cancel(self) -> None: # complete if the job is supsended, so we also need to resume it. # TODO: check that this is best practice for killing a job. self.proc.resume() - self.proc.terminate() + self.proc.terminate() \ No newline at end of file diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 6fe3145c..072c55a9 100644 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -95,12 +95,8 @@ def parse_args(self) -> typing.Any: p_analyze.add_argument('--bybitfield', action='store_true', help='slice by bitfield/non-bitfield sorting') - p_analyze.add_argument('--logfile', type=str, nargs='+', default=None, + p_analyze.add_argument('logfile', type=str, nargs='+', help='logfile(s) to analyze') - p_analyze.add_argument('--logdir', type=str, default=None, - help='directory containing multiple logfiles to analyze') - p_analyze.add_argument('--figfile', type=str, default=None, - help='figure to be created if logdir is passed') p_graph = sp.add_parser('graph', help='create graph with plotting statistics') p_graph.add_argument('logdir', type=str, @@ -184,17 +180,31 @@ def main() -> None: with cfg.setup(): root_logger = logging.getLogger() - handler = logging.handlers.RotatingFileHandler( + root_handler = logging.handlers.RotatingFileHandler( backupCount=10, encoding='utf-8', filename=cfg.logging.application, maxBytes=10_000_000, ) - formatter = Iso8601Formatter(fmt='%(asctime)s: %(message)s') - handler.setFormatter(formatter) - root_logger.addHandler(handler) + root_formatter = Iso8601Formatter(fmt='%(asctime)s: %(message)s') + root_handler.setFormatter(root_formatter) + root_logger.addHandler(root_handler) root_logger.setLevel(logging.INFO) - root_logger.info('abc') + root_logger.info('Start root logger') + + disk_space_logger = logging.getLogger("disk_space") + disk_space_logger.propagate = False + disk_space_handler = logging.handlers.RotatingFileHandler( + backupCount=10, + encoding='utf-8', + filename=cfg.logging.disk_spaces, + maxBytes=10_000_000, + ) + disk_space_formatter = Iso8601Formatter(fmt='%(asctime)s: %(message)s') + disk_space_handler.setFormatter(disk_space_formatter) + disk_space_logger.addHandler(disk_space_handler) + disk_space_logger.setLevel(logging.INFO) + disk_space_logger.info('Start disk space logger') # # Stay alive, spawning plot jobs @@ -202,11 +212,14 @@ def main() -> None: if args.cmd == 'plot': print('...starting plot loop') while True: - wait_reason = manager.maybe_start_new_plot(cfg.directories, cfg.scheduling, cfg.plotting, cfg.logging) + (started, msg) = manager.maybe_start_new_plot(cfg.directories, cfg.scheduling, cfg.plotting, cfg.logging) # TODO: report this via a channel that can be polled on demand, so we don't spam the console - if wait_reason: - print('...sleeping %d s: %s' % (cfg.scheduling.polling_time_s, wait_reason)) + if started: + print('%s' % (msg)) + else: + print('...sleeping %d s: %s' % (cfg.scheduling.polling_time_s, msg)) + root_logger.info('[plot] %s', msg) time.sleep(cfg.scheduling.polling_time_s) @@ -269,21 +282,28 @@ def main() -> None: # Start running archival elif args.cmd == 'archive': if cfg.archiving is None: - print('archiving not configured but is required for this command') + start_msg = 'archiving not configured but is required for this command' + print(start_msg) + root_logger.info('[archive] %s', start_msg) else: - print('...starting archive loop') + start_msg = '...starting archive loop' + print(start_msg) + root_logger.info('[archive] %s', start_msg) firstit = True while True: if not firstit: - print('Sleeping 60s until next iteration...') - time.sleep(60) + print('Sleeping %d s until next iteration...' % (cfg.scheduling.polling_time_s)) + time.sleep(cfg.scheduling.polling_time_s) jobs = Job.get_running_jobs(cfg.logging.plots) firstit = False archiving_status, log_messages = archive.spawn_archive_process(cfg.directories, cfg.archiving, cfg.logging, jobs) - for log_message in log_messages: - print(log_message) - + if log_messages: + for log_message in log_messages: + print(log_message) + root_logger.info('[archive] %s', log_message) + else: + root_logger.info('[archive] %s', archiving_status) # Debugging: show the destination drive usage schedule elif args.cmd == 'dsched': @@ -356,4 +376,4 @@ def main() -> None: job.suspend() elif args.cmd == 'resume': print('Resuming ' + job.plot_id) - job.resume() + job.resume() \ No newline at end of file From a6c65ed134feca85743a315fe35f04f092eac78d Mon Sep 17 00:00:00 2001 From: nikwl Date: Sat, 28 Aug 2021 20:27:19 -0400 Subject: [PATCH 18/24] logdir is no longer required, instead it pull from the logdir defined in the config file --- src/plotman/plotman.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 072c55a9..49b37583 100644 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -99,10 +99,10 @@ def parse_args(self) -> typing.Any: help='logfile(s) to analyze') p_graph = sp.add_parser('graph', help='create graph with plotting statistics') - p_graph.add_argument('logdir', type=str, - help='directory containing multiple logfiles to graph') p_graph.add_argument('figfile', type=str, help='graph file produced as output (.png, .jpg, etc.)') + p_graph.add_argument('--logdir', type=str, default=None, + help='directory containing multiple logfiles to graph') p_graph.add_argument('--latest_k', type=int, default=None, help='if passed, will only graph statistics for the latest k plots') p_graph.add_argument('--window', type=int, default=3, @@ -235,6 +235,9 @@ def main() -> None: # Graphing of completed jobs # elif args.cmd == 'graph': + # If no logdir was passed, use the dir specified in cfg (this will almost always be the case) + if args.logdir is None: + args.logdir = cfg.logging.plots graph.graph(args.logdir, args.figfile, args.latest_k, args.window) # From 134d4b7f2043d655faafd41735ab5712d2a91d3c Mon Sep 17 00:00:00 2001 From: nikwl Date: Sat, 28 Aug 2021 20:46:28 -0400 Subject: [PATCH 19/24] Added type annotations to functions --- src/plotman/graph.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/src/plotman/graph.py b/src/plotman/graph.py index aa740a76..89e3efdb 100644 --- a/src/plotman/graph.py +++ b/src/plotman/graph.py @@ -1,19 +1,13 @@ import os -import time, datetime -import re -import statistics -import sys -import argparse import numpy as np - import matplotlib import matplotlib.pyplot as plt from plotman.log_parser import PlotLogParser -def create_ax_dumbbell(ax, data, max_stacked=50) -> None: +def create_ax_dumbbell(ax : matplotlib.pyplot.axis, data : np.array, max_stacked: int = 50) -> None: ''' Create a dumbbell plot of concurrent plot instances over time. Parameters: @@ -21,8 +15,8 @@ def create_ax_dumbbell(ax, data, max_stacked=50) -> None: data: numpy arrary with [start times, end times]. ''' - def newline(p1, p2, color='r'): - l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color=color) + def newline(p1 : float, p2 : float) -> matplotlib.lines.Line2D: + l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color='r') ax.add_line(l) return l @@ -44,7 +38,7 @@ def newline(p1, p2, color='r'): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plotrate(ax, data, end=True, window=3) -> None: +def create_ax_plotrate(ax : matplotlib.pyplot.axis, data : np.array, end : bool = True, window : int = 3) -> None: ''' Create a plot showing the rate of plotting over time. Can be computed with respect to the plot start (this is rate of plot creation) or @@ -56,7 +50,7 @@ def create_ax_plotrate(ax, data, end=True, window=3) -> None: window: Window to compute rate over. ''' - def estimate_rate(data, window): + def estimate_rate(data : np.array, window : int) -> np.array: rate_list = [] window_list = [] # This takes care of when we dont have a full window @@ -83,7 +77,7 @@ def estimate_rate(data, window): ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plottime(ax, data, window=3) -> None: +def create_ax_plottime(ax : matplotlib.pyplot.axis, data : np.array, window : int = 3) -> None: ''' Create a plot showing the average time to create a single plot. This is computed using a moving average. Note that the plot may not be @@ -108,7 +102,7 @@ def create_ax_plottime(ax, data, window=3) -> None: ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) -def create_ax_plotcumulative(ax, data) -> None: +def create_ax_plotcumulative(ax : matplotlib.pyplot.axis, data : np.array) -> None: ''' Create a plot showing the cumulative number of plots over time. Parameters: @@ -122,7 +116,7 @@ def create_ax_plotcumulative(ax, data) -> None: def graph(logdir : str, figfile : str, latest_k : int, window : int) -> None: - assert window >= 2, "Cannot compute moving average over such a small window" + assert window >= 2, "Cannot compute moving average over a window less than 3" assert os.path.isdir(logdir) # Build a list of the logfiles From a8039ddd1fec25a924588b20900c358d96d72a39 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Sat, 28 Aug 2021 23:27:48 -0400 Subject: [PATCH 20/24] black --- src/plotman/configuration.py | 3 +- src/plotman/graph.py | 187 +++++++++++++++++++---------------- src/plotman/interactive.py | 1 + src/plotman/job.py | 2 +- src/plotman/plotman.py | 33 +++++-- 5 files changed, 127 insertions(+), 99 deletions(-) diff --git a/src/plotman/configuration.py b/src/plotman/configuration.py index e73c3c17..c5ce1482 100644 --- a/src/plotman/configuration.py +++ b/src/plotman/configuration.py @@ -326,11 +326,12 @@ def create_tdisk_space_log_path(self, time: pendulum.DateTime) -> str: directory=self.disk_spaces, group="disk_space", ) + def create_tdisk_space_log_path(self, time: pendulum.DateTime) -> str: return self._create_log_path( time=time, directory=self.disk_spaces, - group='disk_space', + group="disk_space", ) def _create_log_path( diff --git a/src/plotman/graph.py b/src/plotman/graph.py index 89e3efdb..16d4eca9 100644 --- a/src/plotman/graph.py +++ b/src/plotman/graph.py @@ -7,16 +7,18 @@ from plotman.log_parser import PlotLogParser -def create_ax_dumbbell(ax : matplotlib.pyplot.axis, data : np.array, max_stacked: int = 50) -> None: - ''' - Create a dumbbell plot of concurrent plot instances over time. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - ''' - - def newline(p1 : float, p2 : float) -> matplotlib.lines.Line2D: - l = matplotlib.lines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], color='r') +def create_ax_dumbbell( + ax: matplotlib.pyplot.axis, data: np.array, max_stacked: int = 50 +) -> None: + """ + Create a dumbbell plot of concurrent plot instances over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + """ + + def newline(p1: float, p2: float) -> matplotlib.lines.Line2D: + l = matplotlib.lines.Line2D([p1[0], p2[0]], [p1[1], p2[1]], color="r") ax.add_line(l) return l @@ -27,30 +29,32 @@ def newline(p1 : float, p2 : float) -> matplotlib.lines.Line2D: stacker.extend(list(range(max_stacked))) stacker = np.array(stacker) if num_rows % float(max_stacked) != 0: - stacker = stacker[:-(max_stacked-int(num_rows % float(max_stacked)))] + stacker = stacker[: -(max_stacked - int(num_rows % float(max_stacked)))] - for (p1, p2), i in zip(data[:,:2], stacker): + for (p1, p2), i in zip(data[:, :2], stacker): newline([p1, i], [p2, i]) - ax.scatter(data[:,0], stacker, color='b') - ax.scatter(data[:,1], stacker, color='b') - - ax.set_ylabel('Plots') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def create_ax_plotrate(ax : matplotlib.pyplot.axis, data : np.array, end : bool = True, window : int = 3) -> None: - ''' - Create a plot showing the rate of plotting over time. Can be computed - with respect to the plot start (this is rate of plot creation) or - with respect to the plot end (this is rate of plot completion). - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - end: T/F, compute plot creation or plot completion rate. - window: Window to compute rate over. - ''' - - def estimate_rate(data : np.array, window : int) -> np.array: + ax.scatter(data[:, 0], stacker, color="b") + ax.scatter(data[:, 1], stacker, color="b") + + ax.set_ylabel("Plots") + ax.set_xlim(np.min(data[:, 0]) - 2, np.max(data[:, 1]) + 2) + + +def create_ax_plotrate( + ax: matplotlib.pyplot.axis, data: np.array, end: bool = True, window: int = 3 +) -> None: + """ + Create a plot showing the rate of plotting over time. Can be computed + with respect to the plot start (this is rate of plot creation) or + with respect to the plot end (this is rate of plot completion). + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + end: T/F, compute plot creation or plot completion rate. + window: Window to compute rate over. + """ + + def estimate_rate(data: np.array, window: int) -> np.array: rate_list = [] window_list = [] # This takes care of when we dont have a full window @@ -59,85 +63,94 @@ def estimate_rate(data : np.array, window : int) -> np.array: window_list.append(i) # This takes care of when we do for i in range(len(data) - window): - rate_list.append(data[i+window] - data[i]) + rate_list.append(data[i + window] - data[i]) window_list.append(window) rate_list, window_list = np.array(rate_list), np.array(window_list) - rate_list[rate_list == 0] = np.nan # This prevents div by zero error - return np.where(np.logical_not(np.isnan(rate_list)), (window_list-1) / rate_list, 0) + rate_list[rate_list == 0] = np.nan # This prevents div by zero error + return np.where( + np.logical_not(np.isnan(rate_list)), (window_list - 1) / rate_list, 0 + ) # Estimate the rate of ending or the rate of starting if end: - rate = estimate_rate(data[:,1], window) - ax.plot(data[:,1], rate) + rate = estimate_rate(data[:, 1], window) + ax.plot(data[:, 1], rate) else: - rate = estimate_rate(data[:,0], window) - ax.plot(data[:,0], rate) - - ax.set_ylabel('Avg Plot Rate (plots/hour)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) - - -def create_ax_plottime(ax : matplotlib.pyplot.axis, data : np.array, window : int = 3) -> None: - ''' - Create a plot showing the average time to create a single plot. This is - computed using a moving average. Note that the plot may not be - very accurate for the beginning and ending windows. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - window: Window to compute rate over. - ''' + rate = estimate_rate(data[:, 0], window) + ax.plot(data[:, 0], rate) + + ax.set_ylabel("Avg Plot Rate (plots/hour)") + ax.set_xlim(np.min(data[:, 0]) - 2, np.max(data[:, 1]) + 2) + + +def create_ax_plottime( + ax: matplotlib.pyplot.axis, data: np.array, window: int = 3 +) -> None: + """ + Create a plot showing the average time to create a single plot. This is + computed using a moving average. Note that the plot may not be + very accurate for the beginning and ending windows. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + window: Window to compute rate over. + """ # Compute moving avg kernel = np.ones(window) / window - data_tiled = np.vstack(( - np.expand_dims(data[:,1] - data[:,0], axis=1), - np.tile(data[-1,1] - data[-1,0], (window-1, 1)) - )) - rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode='valid') + data_tiled = np.vstack( + ( + np.expand_dims(data[:, 1] - data[:, 0], axis=1), + np.tile(data[-1, 1] - data[-1, 0], (window - 1, 1)), + ) + ) + rolling_avg = np.convolve(data_tiled.squeeze(), kernel, mode="valid") - ax.plot(data[:,1], rolling_avg) + ax.plot(data[:, 1], rolling_avg) - ax.set_ylabel('Avg Plot Time (hours)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + ax.set_ylabel("Avg Plot Time (hours)") + ax.set_xlim(np.min(data[:, 0]) - 2, np.max(data[:, 1]) + 2) -def create_ax_plotcumulative(ax : matplotlib.pyplot.axis, data : np.array) -> None: - ''' - Create a plot showing the cumulative number of plots over time. - Parameters: - ax: a matplotlib axis. - data: numpy arrary with [start times, end times]. - ''' - ax.plot(data[:,1], range(data.shape[0])) +def create_ax_plotcumulative(ax: matplotlib.pyplot.axis, data: np.array) -> None: + """ + Create a plot showing the cumulative number of plots over time. + Parameters: + ax: a matplotlib axis. + data: numpy arrary with [start times, end times]. + """ + ax.plot(data[:, 1], range(data.shape[0])) - ax.set_ylabel('Total plots (plots)') - ax.set_xlim(np.min(data[:,0])-2, np.max(data[:,1])+2) + ax.set_ylabel("Total plots (plots)") + ax.set_xlim(np.min(data[:, 0]) - 2, np.max(data[:, 1]) + 2) -def graph(logdir : str, figfile : str, latest_k : int, window : int) -> None: +def graph(logdir: str, figfile: str, latest_k: int, window: int) -> None: assert window >= 2, "Cannot compute moving average over a window less than 3" assert os.path.isdir(logdir) # Build a list of the logfiles logdir = os.path.abspath(logdir) - logfilenames = [os.path.join(logdir, l) for l in os.listdir(logdir) if - os.path.splitext(l)[-1] == '.log'] + logfilenames = [ + os.path.join(logdir, l) + for l in os.listdir(logdir) + if os.path.splitext(l)[-1] == ".log" + ] assert len(logfilenames) > 0, "Directory contains no files {}".format(logdir) # For each log file, extract the start, end, and duration time_catter = [] - parser = PlotLogParser() + parser = PlotLogParser() for logfilename in logfilenames: - with open(logfilename, 'r') as f: + with open(logfilename, "r") as f: info = parser.parse(f) if info.total_time_raw != 0: time_catter.append( [ - info.started_at.timestamp(), + info.started_at.timestamp(), info.started_at.timestamp() + info.total_time_raw, - info.total_time_raw + info.total_time_raw, ] ) @@ -158,21 +171,21 @@ def graph(logdir : str, figfile : str, latest_k : int, window : int) -> None: # Create figure num_plots = 4 - f, _ = plt.subplots(2,1, figsize=(8, 10)) - ax = plt.subplot(num_plots,1,1) - ax.set_title('Plot performance summary') + f, _ = plt.subplots(2, 1, figsize=(8, 10)) + ax = plt.subplot(num_plots, 1, 1) + ax.set_title("Plot performance summary") create_ax_dumbbell(ax, data_started_ended) if data_started_ended.shape[0] > window: - ax = plt.subplot(num_plots,1,2) + ax = plt.subplot(num_plots, 1, 2) create_ax_plotrate(ax, data_started_ended, end=True, window=window) - ax = plt.subplot(num_plots,1,3) + ax = plt.subplot(num_plots, 1, 3) create_ax_plottime(ax, data_started_ended, window=window) - ax = plt.subplot(num_plots,1,4) + ax = plt.subplot(num_plots, 1, 4) create_ax_plotcumulative(ax, data_started_ended) - ax.set_xlabel('Time (hours)') - f.savefig(figfile) \ No newline at end of file + ax.set_xlabel("Time (hours)") + f.savefig(figfile) diff --git a/src/plotman/interactive.py b/src/plotman/interactive.py index 9bd6c149..5416351f 100644 --- a/src/plotman/interactive.py +++ b/src/plotman/interactive.py @@ -13,6 +13,7 @@ root_logger = logging.getLogger() + class TerminalTooSmallError(Exception): pass diff --git a/src/plotman/job.py b/src/plotman/job.py index 23765a82..198ff674 100644 --- a/src/plotman/job.py +++ b/src/plotman/job.py @@ -365,4 +365,4 @@ def cancel(self) -> None: # complete if the job is supsended, so we also need to resume it. # TODO: check that this is best practice for killing a job. self.proc.resume() - self.proc.terminate() \ No newline at end of file + self.proc.terminate() diff --git a/src/plotman/plotman.py b/src/plotman/plotman.py index 6fc3b680..8ad74488 100644 --- a/src/plotman/plotman.py +++ b/src/plotman/plotman.py @@ -158,15 +158,28 @@ def parse_args(self) -> typing.Any: "logfile", type=str, nargs="+", help="logfile(s) to analyze" ) - p_graph = sp.add_parser('graph', help='create graph with plotting statistics') - p_graph.add_argument('figfile', type=str, - help='graph file produced as output (.png, .jpg, etc.)') - p_graph.add_argument('--logdir', type=str, default=None, - help='directory containing multiple logfiles to graph') - p_graph.add_argument('--latest_k', type=int, default=None, - help='if passed, will only graph statistics for the latest k plots') - p_graph.add_argument('--window', type=int, default=3, - help='window size to compute moving average over') + p_graph = sp.add_parser("graph", help="create graph with plotting statistics") + p_graph.add_argument( + "figfile", type=str, help="graph file produced as output (.png, .jpg, etc.)" + ) + p_graph.add_argument( + "--logdir", + type=str, + default=None, + help="directory containing multiple logfiles to graph", + ) + p_graph.add_argument( + "--latest_k", + type=int, + default=None, + help="if passed, will only graph statistics for the latest k plots", + ) + p_graph.add_argument( + "--window", + type=int, + default=3, + help="window size to compute moving average over", + ) args = parser.parse_args() return args @@ -310,7 +323,7 @@ def main() -> None: # # Graphing of completed jobs # - elif args.cmd == 'graph': + elif args.cmd == "graph": # If no logdir was passed, use the dir specified in cfg (this will almost always be the case) if args.logdir is None: args.logdir = cfg.logging.plots From b55fb571c7c0d746f54cb417829094e2017a7a09 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Sat, 28 Aug 2021 23:31:45 -0400 Subject: [PATCH 21/24] tidy --- CHANGELOG.md | 7 +++---- src/plotman/configuration.py | 8 +------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f76e6d27..eca211e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,10 +18,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#898](https://github.com/ericaltendorf/plotman/pull/898)) - Output same entries to plotman.log from 'plotman interactive' and ' plotman plot/archive' "daemons". ([#878](https://github.com/ericaltendorf/plotman/pull/878)) -- `plotman graph` command to create a matplotlib plot for completed - plots ([#612](https://github.com/ericaltendorf/plotman/pull/612)). - Creates a graph image showing plots over time, average plot rage, - average plot time, and total number of plots over time. +- `plotman graph` command to create a matplotlib plot for completed plots. + Creates a graph image showing plots over time, average plot rate, average plot time, and total number of plots over time. + ([#612](https://github.com/ericaltendorf/plotman/pull/612)) ## [0.5.1] - 2021-07-15 ### Fixed diff --git a/src/plotman/configuration.py b/src/plotman/configuration.py index c5ce1482..95ad4a90 100644 --- a/src/plotman/configuration.py +++ b/src/plotman/configuration.py @@ -48,6 +48,7 @@ def get_validated_configs( config_text: str, config_path: str, preset_target_definitions_text: str ) -> "PlotmanConfig": """Return a validated instance of PlotmanConfig with data from plotman.yaml + :raises ConfigurationException: Raised when plotman.yaml is either missing or malformed """ schema = desert.schema(PlotmanConfig) @@ -327,13 +328,6 @@ def create_tdisk_space_log_path(self, time: pendulum.DateTime) -> str: group="disk_space", ) - def create_tdisk_space_log_path(self, time: pendulum.DateTime) -> str: - return self._create_log_path( - time=time, - directory=self.disk_spaces, - group="disk_space", - ) - def _create_log_path( self, time: pendulum.DateTime, directory: str, group: str ) -> str: From 1011b0f6c40a04d04dcd237069d2adb9792c39f0 Mon Sep 17 00:00:00 2001 From: nikwl Date: Sun, 29 Aug 2021 17:52:12 -0400 Subject: [PATCH 22/24] Updated graph.py parser to new style. Reformatted graph.py with black. --- src/plotman/graph.py | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/src/plotman/graph.py b/src/plotman/graph.py index 16d4eca9..7aa0cddf 100644 --- a/src/plotman/graph.py +++ b/src/plotman/graph.py @@ -4,7 +4,7 @@ import matplotlib import matplotlib.pyplot as plt -from plotman.log_parser import PlotLogParser +import plotman.plotters def create_ax_dumbbell( @@ -141,20 +141,32 @@ def graph(logdir: str, figfile: str, latest_k: int, window: int) -> None: # For each log file, extract the start, end, and duration time_catter = [] - parser = PlotLogParser() for logfilename in logfilenames: - with open(logfilename, "r") as f: - info = parser.parse(f) - if info.total_time_raw != 0: - time_catter.append( - [ - info.started_at.timestamp(), - info.started_at.timestamp() + info.total_time_raw, - info.total_time_raw, - ] - ) - - assert len(time_catter) > 0, "No valid log files found, need a finished plot" + with open(logfilename) as file: + try: + plotter_type = plotman.plotters.get_plotter_from_log(lines=file) + except plotman.errors.UnableToIdentifyPlotterFromLogError: + continue + + parser = plotter_type() + + with open(logfilename, "rb") as binary_file: + read_bytes = binary_file.read() + + parser.update(chunk=read_bytes) + info = parser.common_info() + + # Extract timing information + if info.total_time_raw != 0: + time_catter.append( + [ + info.started_at.timestamp(), + info.started_at.timestamp() + info.total_time_raw, + info.total_time_raw, + ] + ) + + assert len(time_catter) > 0, "No valid log files found" # This array will hold start and end data (in hours) data_started_ended = np.array(time_catter) / (60 * 60) From 3bc6d9046cccb0e0bad698164fec0daba30e95b7 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Mon, 30 Aug 2021 08:43:48 -0400 Subject: [PATCH 23/24] Update setup.cfg --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index ad57d433..28933724 100644 --- a/setup.cfg +++ b/setup.cfg @@ -73,6 +73,7 @@ checks = mypy == 0.902 types-pkg_resources ~= 0.1.2 %(test)s + %(graph)s graph = matplotlib ~= 3.4 numpy ~= 1.20 From d15ec4c5a1aa1a8fdcf664a14098d07abb742068 Mon Sep 17 00:00:00 2001 From: Kyle Altendorf Date: Mon, 30 Aug 2021 08:54:38 -0400 Subject: [PATCH 24/24] [mypy-matplotlib] ignore_missing_imports = true --- mypy.ini | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mypy.ini b/mypy.ini index 2b2c6b0f..d4e50318 100644 --- a/mypy.ini +++ b/mypy.ini @@ -9,6 +9,9 @@ ignore_missing_imports = true [mypy-click] ignore_missing_imports = true +[mypy-matplotlib] +ignore_missing_imports = true + [mypy-pendulum] # TODO: https://github.com/sdispater/pendulum/pull/551 implicit_reexport = true