diff --git a/.gitignore b/.gitignore index d8ed0a3d5..927e62091 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ docs/html/* api/* bexhoma/__pycache__/* /cluster-monitoring-default.config +logs_tests/local/* \ No newline at end of file diff --git a/benchbase.py b/benchbase.py index 03e7f620e..5f51725b6 100644 --- a/benchbase.py +++ b/benchbase.py @@ -33,7 +33,7 @@ parser = argparse.ArgumentParser(description=description) parser.add_argument('mode', help='start sut, also load data or also run the TPC-C queries', choices=['run', 'start', 'load']) parser.add_argument('-aws', '--aws', help='fix components to node groups at AWS', action='store_true', default=False) - parser.add_argument('-dbms','--dbms', help='DBMS to load the data', choices=['PostgreSQL', 'MySQL', 'MariaDB', 'YugabyteDB', 'CockroachDB'], default=[], action='append') + parser.add_argument('-dbms','--dbms', help='DBMS to load the data', choices=['PostgreSQL', 'MySQL', 'MariaDB', 'YugabyteDB', 'CockroachDB', 'DatabaseService'], default=[], action='append') parser.add_argument('-db', '--debug', help='dump debug informations', action='store_true') parser.add_argument('-sl', '--skip-loading', help='do not ingest, start benchmarking immediately', action='store_true', default=False) parser.add_argument('-cx', '--context', help='context of Kubernetes (for a multi cluster environment), default is current context', default=None) @@ -344,23 +344,27 @@ if skip_loading: config.loading_deactivated = True config.sut_service_name = "yb-tserver-service" # fix service name of SUT, because it is not managed by bexhoma - config.sut_container_name = "yb-tserver" # fix container name of SUT + config.sut_container_name = '' # fix container name of SUT def get_worker_pods(self): """ Returns a list of all pod names of workers for the current SUT. Default is component name is 'worker' for a bexhoma managed DBMS. + This is used for example to find the pods of the workers in order to get the host infos (CPU, RAM, node name, ...). YugabyteDB: This is yb-tserver-0, -1 etc. :return: list of endpoints """ - pods_worker = self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) + pods_worker = ['yb-tserver-0', 'yb-tserver-1', 'yb-tserver-2'] + #pods_worker = self.experiment.cluster.get_pods(app='', component='', configuration='yb-tserver', experiment='') + #print("****************", pods_worker) return pods_worker - #config.get_worker_pods = types.MethodType(get_worker_pods, config) + config.get_worker_pods = types.MethodType(get_worker_pods, config) def create_monitoring(self, app='', component='monitoring', experiment='', configuration=''): """ Generate a name for the monitoring component. + This is used in a pattern for promql. Basically this is `{app}-{component}-{configuration}-{experiment}-{client}`. - For YugabyteDB, the service to be monitored is named like 'yb-tserver-'. + For YugabyteDB, the service of the SUT to be monitored is named like 'yb-tserver-'. :param app: app the component belongs to :param component: Component, for example sut or monitoring @@ -379,21 +383,29 @@ def get_worker_endpoints(self): Returns all endpoints of a headless service that monitors nodes of a distributed DBMS. These are IPs of cAdvisor instances. The endpoint list is to be filled in a config of an instance of Prometheus. - For YugabyteDB the service is fixed to be 'bexhoma-service-monitoring-default' and does not depend on the experiment. + By default, the workers can be found by the name of their component (worker-0 etc). + This is neccessary, when we have sidecar containers attached to workers of a distributed dbms. :return: list of endpoints """ - endpoints = self.experiment.cluster.get_service_endpoints(service_name="bexhoma-service-monitoring-default") + endpoints = [] + #name_worker = self.generate_component_name(component='worker', configuration=self.configuration, experiment=self.code) + pods_worker = self.get_worker_pods() + for pod in pods_worker: + #endpoint = '{worker}.{service_sut}'.format(worker=pod, service_sut=name_worker) + endpoint = '{worker}'.format(worker=pod) + endpoints.append(endpoint) + print('Worker Endpoint: {endpoint}'.format(endpoint = endpoint)) self.logger.debug("yugabytedb.get_worker_endpoints({})".format(endpoints)) return endpoints - #config.get_worker_endpoints = types.MethodType(get_worker_endpoints, config) + config.get_worker_endpoints = types.MethodType(get_worker_endpoints, config) def set_metric_of_config(self, metric, host, gpuid): """ Returns a promql query. Parameters in this query are substituted, so that prometheus finds the correct metric. Example: In 'sum(irate(container_cpu_usage_seconds_total{{container_label_io_kubernetes_pod_name=~"(.*){configuration}-{experiment}(.*)", container_label_io_kubernetes_pod_name=~"(.*){configuration}-{experiment}(.*)", container_label_io_kubernetes_container_name="dbms"}}[1m]))' configuration and experiment are placeholders and will be replaced by concrete values. - Here: We do not have a SUT that is specific to the experiment or configuration. + YugabyteDB: We do not have a SUT that is specific to the experiment or configuration. The pod names follow a pattern like yb-tserver and there is no container name. :param metric: Parametrized promql query :param host: Name of the host the metrics should be collected from @@ -401,6 +413,7 @@ def set_metric_of_config(self, metric, host, gpuid): :return: promql query without parameters """ metric = metric.replace(', container="dbms"', '') + metric = metric.replace(', container_label_io_kubernetes_container_name="dbms"', '') return metric.format(host=host, gpuid=gpuid, configuration='yb-tserver', experiment='') config.set_metric_of_config = types.MethodType(set_metric_of_config, config) config.set_loading_parameters( @@ -452,8 +465,6 @@ def set_metric_of_config(self, metric, host, gpuid): ) #print(executor_list) config.add_benchmark_list(executor_list) - #print(executor_list) - config.add_benchmark_list(executor_list) cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service if ("CockroachDB" in args.dbms):# or len(args.dbms) == 0): # not included per default # CockroachDB @@ -508,9 +519,60 @@ def set_metric_of_config(self, metric, host, gpuid): ) #print(executor_list) config.add_benchmark_list(executor_list) + #cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service + if ("DatabaseService" in args.dbms):# or len(args.dbms) == 0): # not included per default + # DatabaseService + name_format = 'DatabaseService-{threads}-{pods}-{target}' + config = configurations.benchbase(experiment=experiment, docker='DatabaseService', configuration=name_format.format(threads=loading_threads, pods=loading_pods, target=loading_target), alias='DatabaseService') + config.monitoring_sut = False # cannot be monitored since outside of K8s + if skip_loading: + config.loading_deactivated = True + config.set_loading_parameters( + PARALLEL = str(loading_pods), # =1 + SF = SF, + BENCHBASE_BENCH = type_of_benchmark,#'tpcc', + BENCHBASE_PROFILE = 'postgres', + BEXHOMA_DATABASE = 'postgres', + BEXHOMA_HOST = 'bexhoma-service.perdelt.svc.cluster.local', + #BENCHBASE_TARGET = int(target), + BENCHBASE_TERMINALS = loading_threads_per_pod, + BENCHBASE_TIME = SD, + BENCHBASE_ISOLATION = "TRANSACTION_READ_COMMITTED", + ) + config.set_loading(parallel=loading_pods, num_pods=loading_pods) + executor_list = [] + for factor_benchmarking in num_benchmarking_target_factors:#range(1, 9):#range(1, 2):#range(1, 15): + benchmarking_target = target_base*factor_benchmarking#4*4096*t + for benchmarking_threads in num_benchmarking_threads: + for benchmarking_pods in num_benchmarking_pods:#[1,2]:#[1,8]:#range(2,5): + for num_executor in list_clients: + benchmarking_pods_scaled = num_executor*benchmarking_pods + benchmarking_threads_per_pod = int(benchmarking_threads/benchmarking_pods) + benchmarking_target_per_pod = int(benchmarking_target/benchmarking_pods) + """ + print("benchmarking_target", benchmarking_target) + print("benchmarking_pods", benchmarking_pods) + print("benchmarking_pods_scaled", benchmarking_pods_scaled) + print("benchmarking_threads", benchmarking_threads) + print("benchmarking_threads_per_pod", benchmarking_threads_per_pod) + print("benchmarking_target_per_pod", benchmarking_target_per_pod) + """ + executor_list.append(benchmarking_pods_scaled) + config.add_benchmarking_parameters( + PARALLEL = str(benchmarking_pods_scaled), + SF = SF, + BENCHBASE_BENCH = type_of_benchmark,#'tpcc', + BENCHBASE_PROFILE = 'postgres', + BEXHOMA_DATABASE = 'postgres', + BEXHOMA_HOST = 'bexhoma-service.perdelt.svc.cluster.local', + BENCHBASE_TARGET = benchmarking_target_per_pod, + BENCHBASE_TERMINALS = benchmarking_threads_per_pod, + BENCHBASE_TIME = SD, + BENCHBASE_ISOLATION = "TRANSACTION_READ_COMMITTED", + ) #print(executor_list) config.add_benchmark_list(executor_list) - cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service + #cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service ############## ### wait for necessary nodegroups to have planned size ############## diff --git a/bexhoma/clusters.py b/bexhoma/clusters.py index 0d1d3589b..079dd8ee9 100644 --- a/bexhoma/clusters.py +++ b/bexhoma/clusters.py @@ -37,6 +37,7 @@ import urllib.request import urllib.parse from pprint import pprint +from datetime import datetime, timedelta from dbmsbenchmarker import * @@ -865,9 +866,9 @@ def execute_command_in_pod(self, command, pod='', container='', params=''): #pod = self.activepod command_clean = command.replace('"','\\"') if len(container) > 0: - fullcommand = 'kubectl --context {context} exec {pod} --container={container} -- bash -c "{command}"'.format(context=self.context, pod=pod, container=container, command=command_clean) + fullcommand = 'kubectl --context {context} exec {pod} --container={container} -- sh -c "{command}"'.format(context=self.context, pod=pod, container=container, command=command_clean) else: - fullcommand = 'kubectl --context {context} exec {pod} -- bash -c "{command}"'.format(context=self.context, pod=pod, command=command_clean) + fullcommand = 'kubectl --context {context} exec {pod} -- sh -c "{command}"'.format(context=self.context, pod=pod, command=command_clean) #fullcommand = 'kubectl exec '+self.activepod+' --container=dbms -- bash -c "'+command_clean+'"' #print(fullcommand) self.logger.debug('testbed.execute_command_in_pod({})'.format(fullcommand)) @@ -1335,22 +1336,28 @@ def test_if_monitoring_healthy(self): config_K8s = self.config['credentials']['k8s'] if 'service_monitoring' in config_K8s['monitor']: url = config_K8s['monitor']['service_monitoring'].format(namespace=self.contextdata['namespace'], service="monitoring") - query = "node_memory_MemTotal_bytes" + query = "sum(node_memory_MemTotal_bytes)" safe_query = urllib.parse.quote_plus(query) try: - self.logger.debug('Test URL {}'.format(url+"query_range?query="+safe_query+"&start=1&end=2&step=1")) #code= urllib.request.urlopen(url+"query_range?query="+safe_query+"&start=1&end=2&step=1").getcode() # curl -ILs www.welt.de | head -n 1|cut -d$' ' -f2 pod_dashboard = self.get_dashboard_pod_name() self.logger.debug('Inside pod {}'.format(pod_dashboard)) + now = datetime.utcnow() + start = now - timedelta(seconds=300) # 5 minutes ago + end = now - timedelta(seconds=240) # 4 minutes ago cmd = {} - command = "curl -is '{}' | head -n 1|cut -d$' ' -f2".format(url+"query_range?query="+safe_query+"&start=1&end=2&step=1") + query_url = "{url}query_range?query={safe_query}&start={start}&end={end}&step=60".format(url=url, safe_query=safe_query, start=int(start.timestamp()), end=int(end.timestamp())) + self.logger.debug('Test URL {}'.format(query_url)) + command = "curl -L --max-time 10 -is '{}' | head -n 1|cut -d$' ' -f2".format(query_url) + #command = "curl -is '{}' | head -n 1|cut -d$' ' -f2".format(url+"query_range?query="+safe_query+"&start=1&end=2&step=1") self.logger.debug('Command {}'.format(command)) #fullcommand = 'kubectl exec '+self.pod_sut+' --container=dbms -- bash -c "'+command+'"' #cores = os.popen(fullcommand).read() stdin, stdout, stderr = self.execute_command_in_pod(pod=pod_dashboard, command=command, container="dashboard") #print("Return", stdout, stderr) status = stdout#os.popen(fullcommand).read() + self.logger.debug('Status {}'.format(status)) if len(status)>0: #return int(status) #print(int(status)) diff --git a/bexhoma/configurations.py b/bexhoma/configurations.py index 442b94f2a..675ae1250 100644 --- a/bexhoma/configurations.py +++ b/bexhoma/configurations.py @@ -142,6 +142,7 @@ def __init__(self, experiment, docker=None, configuration='', script=None, alias self.loading_active = experiment.loading_active self.loading_deactivated = False # Do not load at all and do not test for loading self.monitor_loading = True #: Fetch metrics for the loading phase, if monítoring is active - this is set to False when loading is skipped due to PV + self.monitoring_sut = True #: Fetch metrics of SUT, if monítoring is active - this is set to False when a service outside of K8s is benchmarked self.jobtemplate_maintaining = "" self.jobtemplate_loading = "" #self.parallelism = 1 @@ -164,7 +165,7 @@ def __init__(self, experiment, docker=None, configuration='', script=None, alias self.loading_timespans = {} # Dict of lists per container of (start,end) pairs containing time markers of loading pods self.benchmarking_timespans = {} # Dict of lists per container of (start,end) pairs containing time markers of benchmarking pods self.sut_service_name = "" # Name of the DBMS service name, if it is fixed and not installed per configuration - self.sut_container_name = "dbms" # Name of the container in the SUT pod, that should be monitored + self.sut_container_name = "dbms" # Name of the container in the SUT pod, that should be monitored, and for reading infos via ssh self.reset_sut() self.benchmark = None # Optional subobject for benchmarking (dbmsbenchmarker instance) def reset_sut(self): @@ -746,6 +747,7 @@ def start_maintaining(self, app='', component='maintaining', experiment='', conf def create_monitoring(self, app='', component='monitoring', experiment='', configuration=''): """ Generate a name for the monitoring component. + This is used in a pattern for promql. Basically this is `{app}-{component}-{configuration}-{experiment}-{client}` :param app: app the component belongs to @@ -829,6 +831,7 @@ def start_monitoring(self, app='', component='monitoring', experiment='', config endpoints_cluster = self.experiment.cluster.get_service_endpoints(service_name="bexhoma-service-monitoring-default") i = 0 for endpoint in endpoints_cluster: + print("{:30s}: found monitoring endpoint (cAdvisor) for monitoring {} (added to Prometheus) of daemonset".format(configuration, endpoint)) #print('Worker: {worker}.{service_sut}'.format(worker=pod, service_sut=name_worker)) prometheus_config += """ - job_name: '{endpoint}' @@ -838,23 +841,26 @@ def start_monitoring(self, app='', component='monitoring', experiment='', config - targets: ['{endpoint}:9300']""".format(endpoint=endpoint, client=i, prometheus_interval=self.prometheus_interval, prometheus_timeout=self.prometheus_timeout) i = i + 1 # services of workers - endpoints_worker = self.get_worker_endpoints() - #name_worker = self.generate_component_name(component='worker', configuration=self.configuration, experiment=self.code) - #pods_worker = self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) - i = 0 - #for pod in pods_worker: - for endpoint in endpoints_worker: - if endpoint in endpoints_cluster: - # we already monitor this endpoint - continue - #print('Worker: {worker}.{service_sut}'.format(worker=pod, service_sut=name_worker)) - prometheus_config += """ + if len(endpoints_cluster) == 0: + endpoints_worker = self.get_worker_endpoints() + #name_worker = self.generate_component_name(component='worker', configuration=self.configuration, experiment=self.code) + #pods_worker = self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) + i = 0 + #for pod in pods_worker: + for endpoint in endpoints_worker: + if endpoint in endpoints_cluster: + # we already monitor this endpoint + print("{:30s}: found worker endpoint (cAdvisor) for monitoring {} (already monitored by cluster)".format(configuration, endpoint)) + continue + print("{:30s}: found worker endpoint (cAdvisor) for monitoring {} (added to Prometheus) of sidecar container".format(configuration, endpoint)) + #print('Worker: {worker}.{service_sut}'.format(worker=pod, service_sut=name_worker)) + prometheus_config += """ - job_name: '{endpoint}' scrape_interval: {prometheus_interval} scrape_timeout: {prometheus_timeout} static_configs: - targets: ['{endpoint}:9300']""".format(endpoint=endpoint, client=i, prometheus_interval=self.prometheus_interval, prometheus_timeout=self.prometheus_timeout) - i = i + 1 + i = i + 1 for i,e in enumerate(envs): if e['name'] == 'BEXHOMA_SERVICE': dep['spec']['template']['spec']['containers'][0]['env'][i]['value'] = name_sut @@ -1639,10 +1645,11 @@ def get_host_node(self): try: datastore = json.loads(result) #print(datastore) - if self.appname == datastore['metadata']['labels']['app']: - if self.pod_sut == datastore['metadata']['name']: - node = datastore['spec']['nodeName'] - return node + # why check app? if not managed by bexhoma, this will be completely different + #if self.appname == datastore['metadata']['labels']['app']: + if self.pod_sut == datastore['metadata']['name']: + node = datastore['spec']['nodeName'] + return node except Exception as e: return "" return "" @@ -1899,6 +1906,7 @@ def get_connection_config(self, connection, alias='', dialect='', serverip='loca pods = self.get_worker_pods()#self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) for pod in pods: self.pod_sut = pod + print("{:30s}: distributed system - get host info for worker {}".format(self.configuration, pod)) c['worker'].append(self.get_host_all()) self.pod_sut = pod_sut # take latest resources @@ -2204,30 +2212,47 @@ def run_benchmarker_pod(self, # get monitoring for loading if self.monitoring_active and self.monitor_loading: cmd = {} - print("{:30s}: collecting loading metrics of SUT".format(connection)) - #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, c['name']+'.config', '/results/'+self.code, self.code, self.timeLoadingStart, self.timeLoadingEnd) - cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -cn {} -c {} -cf {} -f {} -e {} -ts {} -te {}'.format( - self.sut_container_name, - connection, - c['name']+'.config', - '/results/'+self.code, - self.code, - self.timeLoadingStart, - self.timeLoadingEnd) - stdin, stdout, stderr = self.experiment.cluster.execute_command_in_pod(command=cmd['fetch_loading_metrics'], pod=pod_dashboard, container="dashboard") - self.logger.debug(stdout) - self.logger.debug(stderr) - # upload connections infos again, metrics has overwritten it - filename = 'connections.config' - cmd['upload_connection_file'] = 'cp {from_file} {to} -c dashboard'.format(to=pod_dashboard+':/results/'+str(self.code)+'/'+filename, from_file=self.path+"/"+filename) - stdout = self.experiment.cluster.kubectl(cmd['upload_connection_file']) - self.logger.debug(stdout) + #self.monitoring_sut = True + if self.monitoring_sut: + print("{:30s}: collecting loading metrics of SUT".format(connection)) + #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, c['name']+'.config', '/results/'+self.code, self.code, self.timeLoadingStart, self.timeLoadingEnd) + # with container name? should better be part of the metric query + #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -cn {} -c {} -cf {} -f {} -e {} -ts {} -te {}'.format( + #metric_example = self.benchmark.dbms[self.configuration].connectiondata['monitoring']['metrics_special']['total_cpu_memory'] + #metric_example = c['name']['monitoring']['metrics_special']['total_cpu_memory'] + #print("{:30s}: example mtric {}".format(connection, metric_example)) + metric_example = self.benchmark.dbms[self.current_benchmark_connection].connectiondata['monitoring']['metrics_special']['total_cpu_memory'] + print("{:30s}: example metric {}".format(connection, metric_example)) + cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -c {} -cf {} -f {} -e {} -ts {} -te {}'.format( + #self.sut_container_name, + connection, + c['name']+'.config', + '/results/'+self.code, + self.code, + self.timeLoadingStart, + self.timeLoadingEnd) + stdin, stdout, stderr = self.experiment.cluster.execute_command_in_pod(command=cmd['fetch_loading_metrics'], pod=pod_dashboard, container="dashboard") + self.logger.debug(stdout) + self.logger.debug(stderr) + # upload connections infos again, metrics has overwritten it + filename = 'connections.config' + cmd['upload_connection_file'] = 'cp {from_file} {to} -c dashboard'.format(to=pod_dashboard+':/results/'+str(self.code)+'/'+filename, from_file=self.path+"/"+filename) + stdout = self.experiment.cluster.kubectl(cmd['upload_connection_file']) + self.logger.debug(stdout) # get metrics of loader components # only if general monitoring is on endpoints_cluster = self.experiment.cluster.get_service_endpoints(service_name="bexhoma-service-monitoring-default") if len(endpoints_cluster)>0 or self.experiment.cluster.monitor_cluster_exists: # data generator container print("{:30s}: collecting metrics of data generator".format(connection)) + metric_example = self.benchmark.dbms[self.current_benchmark_connection].connectiondata['monitoring']['metrics']['total_cpu_memory'].copy() + container = "datagenerator" + if container is not None: + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name="dbms"', 'container_label_io_kubernetes_container_name="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name!="dbms"', 'container_label_io_kubernetes_container_name!="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container="dbms"', 'container="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container!="dbms"', 'container!="{}"'.format(container)) + print("{:30s}: example metric {}".format(connection, metric_example)) cmd['fetch_loader_metrics'] = 'python metrics.py -r /results/ -db -ct datagenerator -cn datagenerator -c {} -cf {} -f {} -e {} -ts {} -te {}'.format( connection, c['name']+'.config', @@ -2245,6 +2270,14 @@ def run_benchmarker_pod(self, self.logger.debug(stdout) # data injector container "sensor" print("{:30s}: collecting metrics of data injector".format(connection)) + metric_example = self.benchmark.dbms[self.current_benchmark_connection].connectiondata['monitoring']['metrics']['total_cpu_memory'].copy() + container = "sensor" + if container is not None: + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name="dbms"', 'container_label_io_kubernetes_container_name="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name!="dbms"', 'container_label_io_kubernetes_container_name!="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container="dbms"', 'container="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container!="dbms"', 'container!="{}"'.format(container)) + print("{:30s}: example metric {}".format(connection, metric_example)) cmd['fetch_loader_metrics'] = 'python metrics.py -r /results/ -db -ct loader -cn sensor -c {} -cf {} -f {} -e {} -ts {} -te {}'.format( connection, c['name']+'.config', @@ -2260,7 +2293,7 @@ def run_benchmarker_pod(self, cmd['upload_connection_file'] = 'cp {from_file} {to} -c dashboard'.format(to=pod_dashboard+':/results/'+str(self.code)+'/'+filename, from_file=self.path+"/"+filename) stdout = self.experiment.cluster.kubectl(cmd['upload_connection_file']) self.logger.debug(stdout) - def execute_command_in_pod_sut(self, command, pod='', container='dbms', params=''): + def execute_command_in_pod_sut(self, command, pod='', container='', params=''): """ Runs an shell command remotely inside a container of a pod. This defaults to the current sut's pod and the container "dbms" @@ -2275,7 +2308,12 @@ def execute_command_in_pod_sut(self, command, pod='', container='dbms', params=' pod=self.pod_sut #pod = self.activepod if len(container) == 0: - container='dbms' + container = self.sut_container_name + #if self.sut_container_name is not None: + # container = self.sut_container_name + #else: + # container = '' + # #container = 'dbms' if self.pod_sut == '': self.check_sut() return self.experiment.cluster.execute_command_in_pod(command=command, pod=pod, container=container, params=params) @@ -3005,10 +3043,12 @@ def get_worker_pods(self): """ Returns a list of all pod names of workers for the current SUT. Default is component name is 'worker' for a bexhoma managed DBMS. + This is used for example to find the pods of the workers in order to get the host infos (CPU, RAM, node name, ...). :return: list of endpoints """ pods_worker = self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) + print("Worker pods found: ", pods_worker) return pods_worker def get_worker_endpoints(self): """ @@ -3016,6 +3056,7 @@ def get_worker_endpoints(self): These are IPs of cAdvisor instances. The endpoint list is to be filled in a config of an instance of Prometheus. By default, the workers can be found by the name of their component (worker-0 etc). + This is neccessary, when we have sidecar containers attached to workers of a distributed dbms. :return: list of endpoints """ @@ -3025,7 +3066,7 @@ def get_worker_endpoints(self): for pod in pods_worker: endpoint = '{worker}.{service_sut}'.format(worker=pod, service_sut=name_worker) endpoints.append(endpoint) - print('Worker: {endpoint}'.format(endpoint = endpoint)) + print('Worker endpoint: {endpoint}'.format(endpoint = endpoint)) return endpoints diff --git a/bexhoma/evaluators.py b/bexhoma/evaluators.py index 3c0c0368c..2700bfcc5 100644 --- a/bexhoma/evaluators.py +++ b/bexhoma/evaluators.py @@ -527,14 +527,17 @@ def test_results(self): if self.include_benchmarking: df = self.get_df_benchmarking() if not df.empty: - print("Benchmarking", df) + #print("Benchmarking", df) + pass self.workflow = self.reconstruct_workflow(df) if not len(self.workflow) == 0: - print("Workflow", self.workflow) + #print("Workflow", self.workflow) + pass if self.include_loading: df = self.get_df_loading() if not df.empty: - print("Loading", df) + #print("Loading", df) + pass return 0 except Exception as e: print(e) @@ -1263,11 +1266,11 @@ def test_results(self): filename = os.fsdecode(file) if filename.endswith(".pickle"): df = pd.read_pickle(self.path+"/"+filename) - print(df) - print(df.index.name) + #print(df) + #print(df.index.name) list_vusers = list(df['vusers']) - print(list_vusers) - print("vusers", " ".join(list_vusers)) + #print(list_vusers) + #print("vusers", " ".join(list_vusers)) return super().test_results() except Exception as e: print(e) diff --git a/bexhoma/experiments.py b/bexhoma/experiments.py index efd59c61f..3f4f4fdec 100644 --- a/bexhoma/experiments.py +++ b/bexhoma/experiments.py @@ -1543,22 +1543,35 @@ def end_benchmarking(self, self.cluster.logger.debug(stdout) # get monitoring for benchmarking if self.monitoring_active: - print("{:30s}: collecting execution metrics of SUT".format(connection)) - cmd['fetch_benchmarking_metrics'] = 'python metrics.py -r /results/ -db -ct stream -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, connection+'.config', '/results/'+self.code, self.code, start_time, end_time) - #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, c['name']+'.config', '/results/'+self.code, self.code, self.timeLoadingStart, self.timeLoadingEnd) - stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['fetch_benchmarking_metrics'], pod=pod_dashboard, container="dashboard") - self.cluster.logger.debug(stdout) - self.cluster.logger.debug(stderr) - # upload connections infos again, metrics has overwritten it - filename = 'connections.config' - cmd['upload_connection_file'] = 'cp {from_file} {to} -c dashboard'.format(to=pod_dashboard+':/results/'+str(self.code)+'/'+filename, from_file=self.path+"/"+filename) - stdout = self.cluster.kubectl(cmd['upload_connection_file']) - self.cluster.logger.debug(stdout) + if config.monitoring_sut: + print("{:30s}: collecting execution metrics of SUT".format(connection)) + #print(config.current_benchmark_connection) + #print(config.benchmark.dbms.keys()) + metric_example = config.benchmark.dbms[config.current_benchmark_connection].connectiondata['monitoring']['metrics_special']['total_cpu_memory'].copy() + print("{:30s}: example metric {}".format(connection, metric_example)) + cmd['fetch_benchmarking_metrics'] = 'python metrics.py -r /results/ -db -ct stream -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, connection+'.config', '/results/'+self.code, self.code, start_time, end_time) + #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, c['name']+'.config', '/results/'+self.code, self.code, self.timeLoadingStart, self.timeLoadingEnd) + stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['fetch_benchmarking_metrics'], pod=pod_dashboard, container="dashboard") + self.cluster.logger.debug(stdout) + self.cluster.logger.debug(stderr) + # upload connections infos again, metrics has overwritten it + filename = 'connections.config' + cmd['upload_connection_file'] = 'cp {from_file} {to} -c dashboard'.format(to=pod_dashboard+':/results/'+str(self.code)+'/'+filename, from_file=self.path+"/"+filename) + stdout = self.cluster.kubectl(cmd['upload_connection_file']) + self.cluster.logger.debug(stdout) # get metrics of benchmarker components # only if general monitoring is on endpoints_cluster = self.cluster.get_service_endpoints(service_name="bexhoma-service-monitoring-default") if len(endpoints_cluster)>0 or self.cluster.monitor_cluster_exists: print("{:30s}: collecting metrics of benchmarker".format(connection)) + metric_example = config.benchmark.dbms[config.current_benchmark_connection].connectiondata['monitoring']['metrics']['total_cpu_memory'].copy() + container = "dbmsbenchmarker" + if container is not None: + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name="dbms"', 'container_label_io_kubernetes_container_name="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container_label_io_kubernetes_container_name!="dbms"', 'container_label_io_kubernetes_container_name!="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container="dbms"', 'container="{}"'.format(container)) + metric_example['query'] = metric_example['query'].replace('container!="dbms"', 'container!="{}"'.format(container)) + print("{:30s}: example metric {}".format(connection, metric_example)) cmd['fetch_benchmarker_metrics'] = 'python metrics.py -r /results/ -db -ct benchmarker -cn dbmsbenchmarker -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, connection+'.config', '/results/'+self.code, self.code, start_time, end_time) #cmd['fetch_loading_metrics'] = 'python metrics.py -r /results/ -db -ct loading -c {} -cf {} -f {} -e {} -ts {} -te {}'.format(connection, c['name']+'.config', '/results/'+self.code, self.code, self.timeLoadingStart, self.timeLoadingEnd) stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['fetch_benchmarker_metrics'], pod=pod_dashboard, container="dashboard") diff --git a/dev/doc_benchbase_testcase_monitoring_summary.md b/dev/doc_benchbase_testcase_monitoring_summary.md new file mode 100644 index 000000000..0199ee5e6 --- /dev/null +++ b/dev/doc_benchbase_testcase_monitoring_summary.md @@ -0,0 +1,100 @@ +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) +* Type: benchbase +* Duration: 1030s +* Code: 1728326500 +* This includes no queries. Benchbase runs the benchmark +* This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['PostgreSQL']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +PostgreSQL-1-1-1024-1 uses docker image postgres:16.1 +* RAM:541008605184 +* CPU:AMD Opteron(tm) Processor 6378 +* Cores:64 +* host:5.15.0-116-generic +* node:cl-worker11 +* disk:253380184 +* datadisk:4409116 +* requests_cpu:4 +* requests_memory:16Gi + +PostgreSQL-1-1-1024-2 uses docker image postgres:16.1 +* RAM:541008605184 +* CPU:AMD Opteron(tm) Processor 6378 +* Cores:64 +* host:5.15.0-116-generic +* node:cl-worker11 +* disk:256636664 +* datadisk:7665440 +* requests_cpu:4 +* requests_memory:16Gi + +### Execution + +| x | experiment_run | terminals | target | pod_count | time | Throughput (requests/second) | Latency Distribution.95th Percentile Latency (microseconds) | Latency Distribution.Average Latency (microseconds) | +|-----------------------|---------------:|:----------|--------|-----------|-------|------------------------------|-------------------------------------------------------------|-----------------------------------------------------| +| PostgreSQL-1-1-1024-1 | 1 | 16 | 16384 | 1 | 300.0 | 2619.73 | 13335.0 | 6102.0 | +| PostgreSQL-1-1-1024-2 | 1 | 16 | 16384 | 2 | 300.0 | 2397.49 | 15397.0 | 6666.5 | + +Warehouses: 16 + +### Workflow + +#### Actual +* DBMS PostgreSQL-1-1-1024 - Pods [[2, 1]] + +#### Planned +* DBMS PostgreSQL-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +PostgreSQL-1-1-1024-1 132.0 1.0 1.0 436.363636 +PostgreSQL-1-1-1024-2 132.0 1.0 2.0 436.363636 + +### Ingestion - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +PostgreSQL-1-1-1024-1 900.72 9.45 4.01 5.63 +PostgreSQL-1-1-1024-2 900.72 9.45 4.01 5.63 + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +PostgreSQL-1-1-1024-1 981.97 10.75 1.33 1.33 +PostgreSQL-1-1-1024-2 981.97 10.75 1.33 1.33 + +### Execution - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +PostgreSQL-1-1-1024-1 2125.39 7.73 4.83 7.10 +PostgreSQL-1-1-1024-2 2174.26 7.16 5.43 8.25 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +PostgreSQL-1-1-1024-1 1323.62 4.88 1.44 1.44 +PostgreSQL-1-1-1024-2 1323.62 2.42 4.17 4.17 + +### Tests +* TEST passed: Throughput (requests/second) contains no 0 or NaN +* TEST passed: Ingestion SUT contains no 0 or NaN in CPU [CPUs] +* TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +* TEST passed: Execution SUT contains no 0 or NaN in CPU [CPUs] +* TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +* TEST passed: Workflow as planned + +| animal_1 | animal_2 | +|-----------:|-----------:| +| 1 | 3 | +| 2 | 4 | + diff --git a/docs/Example-CloudDatabase.md b/docs/Example-CloudDatabase.md new file mode 100644 index 000000000..d8be8d0bd --- /dev/null +++ b/docs/Example-CloudDatabase.md @@ -0,0 +1,1352 @@ +# Example: Benchmark a Cloud Database + + + + +The following example treats **a cloud database that is compatible to PostgreSQL**. + +This differs from the default behaviour of bexhoma, since we benchmark **a distributed DBMS, that is not managed by bexhoma and does not exist in the Kubernetes cluster in the same namespace**. + +Important implications of this: +* Bexhoma does neither start nor stop the DBMS. +* There can only be one DBMS in the cluster at the same time. +* Bexhoma does not know what resides inside of the database. +* Bexhoma still can only monitor the components of the experiment other than the SUT. + +In order to be fully functional, bexhoma installs an instance of PostgreSQL, that does nothing (a container with psql would be enough). +Bexhoma writes infos about the status of the experiment to this "SUT" pod to mimick it has access to the DBMS. +Moreover the container is used to install a schema to the database via psql. + +All metrics in monitoring are summed across all matching components. + +**The results are not official benchmark results. +Exact performance depends on a number of parameters. +You may get different results. +These examples are solely to illustrate how to use bexhoma and show the result evaluation.** + +References: +1. Benchmarking cloud serving systems with YCSB: https://dl.acm.org/doi/10.1145/1807128.1807152 +1. Benchbase Repository: https://github.com/cmu-db/benchbase/wiki/TPC-C +1. OLTP-Bench: An Extensible Testbed for Benchmarking Relational Databases: http://www.vldb.org/pvldb/vol7/p277-difallah.pdf +1. Orchestrating DBMS Benchmarking in the Cloud with Kubernetes: https://doi.org/10.1007/978-3-030-94437-7_6 +1. A Cloud-Native Adoption of Classical DBMS Performance Benchmarks and Tools: https://doi.org/10.1007/978-3-031-68031-1_9 + + +## Types of DBMS + +### Managed by Bexhoma + +We included [CockroachDB](Example-CockroachDB) as an example for a distributed DBMS, that is managed by bexhoma. + +Advantages +* Bexhoma can monitor all components +* Bexhoma knows about loaded databases, their status and timings + +Disadvantages +* This is only implemented for some examples +* This cannot be applied to Cloud services (or any other database outside of the Kubernetes cluster) + +### Not Managed by Bexhoma + +We included [YugaByteDB](Example-YugaByteDB) as an example for a distributed DBMS, that is not managed by bexhoma, but runs in the same Kubernetes cluster. + +Advantages +* Can be applied to all systems running in Kubernetes +* Bexhoma can monitor all components + +Disadvantages +* Bexhoma not necessarily knows about loaded databases, their status and timings - they might be affected by outside services +* This cannot be applied to Cloud services (or any other database outside of the Kubernetes cluster) + +### Outside of Kubernetes + +Here we present an example for a DBMS, that is not managed by bexhoma and might be running outside of the Kubernetes cluster. + +Advantages +* This can be applied to all Cloud services (or any other database outside of the Kubernetes cluster) with a JDBC interface + +Disadvantages +* The SUT cannot be monitored by Bexhoma +* Bexhoma not necessarily knows about loaded databases, their status and timings - they might be affected by outside services + +#### PostgreSQL-compatible Cloud Service - Test Environment Placeholder + +In order to have a test environment, we install a placeholder instance PostgreSQL and treat it like an external service. + +Create the test placeholder +```bash +# start database service +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml +``` + +This starts a deployment `bexhoma-deployment-postgres` with a service `bexhoma-service`. + +We can delete these after the experiment has finished by +```bash +# delete database service +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service +``` + +All demonstration and test runs in the following are run against this placeholder. + + + + + + +## Perform YCSB Benchmark - Ingestion of Data Included + +You will have to change the node selectors there (to names of nodes, that exist in your cluster - or to leave out the corresponding parameters): +```bash +BEXHOMA_NODE_SUT="cl-worker11" +BEXHOMA_NODE_LOAD="cl-worker19" +BEXHOMA_NODE_BENCHMARK="cl-worker19" +LOG_DIR="./logs_tests" + +mkdir -p $LOG_DIR +``` + +For performing the experiment we can run the [ycsb file](https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/ycsb.py). + +Example: +```bash +nohup python ycsb.py -ms 2 -tr \ + -sf 1 \ + -sfo 1 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + run $LOG_DIR/doc_ycsb_databaseservice_1.log & +``` + +This +* loops over `n` in [8] and `t` in [4] + * starts a clean instance of a dummy container as a placeholder for the DatabaseService (`-dbms`) + * data directory inside a Docker container + * creates YCSB schema in each database + * starts `n` loader pods per DBMS + * with a loading container each + * threads = 64/`n` (`-nlt`) + * target throughput is `t` * 16384 + * generates YCSB data = 1.000.000 rows (i.e., SF=10, `-sf`) + * imports it into the DBMS + * loops over `m` in [1] and `s` in [4] + * runs `m` parallel streams of YCSB queries per DBMS + * 10.000.000 operations (`-sfo`) + * workload A = 50% read / 50% write (`--workload`) + * target throughput is `s` * 16384 + * threads = 64/`m` (`-nbt`) + * with a maximum of 1 DBMS per time (`-ms`) (plus 1 for the placeholder) +* tests if results match workflow (`-tr`) +* shows a summary + +### Status + +You can watch the status while benchmark is running via `bexperiments status` + +``` +Dashboard: Running +Cluster Prometheus: Running +Message Queue: Running +Data directory: Running +Result directory: Running ++----------------------------+--------------+--------------+------------+---------------+ +| 1730133803 | sut | loaded [s] | use case | benchmarker | ++============================+==============+==============+============+===============+ +| DatabaseService-64-8-65536 | (1. Running) | 41 | ycsb | (1. Running) | ++----------------------------+--------------+--------------+------------+---------------+ +``` + +The code `1730133803` is the unique identifier of the experiment. +You can find the number also in the output of `ycsb.py`. + +### Cleanup + +The script is supposed to clean up and remove everything from the cluster that is related to the experiment after finishing. +If something goes wrong, you can also clean up manually with `bexperiment stop` (removes everything) or `bexperiment stop -e 1730133803` (removes everything that is related to experiment `1730133803`). + +### Evaluate Results + +At the end of a benchmark you will see a summary like + +```bash +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 354s + Code: 1734663459 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256012 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 49973.150502 20251.0 1000000 25397.0 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 55202.87 18115.0 499487 2095.0 500513 42239.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Workflow as planned +``` + +To see the summary again you can simply call `bexperiments summary -e 1730133803` with the experiment code. + +### Detailed Evaluation + +Results are transformed into pandas DataFrames and can be inspected in detail. +See for example +* [Jupyter Notebooks](https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/tree/master/images/evaluator_dbmsbenchmarker/notebooks/) + +You can connect to an evaluation server locally by `bexperiments jupyter`. +This forwards ports, so you have +* a Jupyter notebook server at http://localhost:8888 + + +## Perform YCSB Benchmark - Execution only + +For performing the experiment we can run the [ycsb file](https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/ycsb.py). + +Example: +```bash +nohup python ycsb.py -ms 2 -tr \ + -sf 1 \ + -sfo 10 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -sl \ + run $LOG_DIR/doc_ycsb_databaseservice_2.log & +``` + +This skips loading (`-sl`), as data is already present in the database. + +```bash +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 262s + Code: 1734667671 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256216 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6149.40 +Minimum Cost Supplier Query (TPC-H Q2) 2110.09 +Shipping Priority (TPC-H Q3) 2435.87 +Order Priority Checking Query (TPC-H Q4) 3075.44 +Local Supplier Volume (TPC-H Q5) 2234.95 +Forecasting Revenue Change (TPC-H Q6) 1171.11 +Forecasting Revenue Change (TPC-H Q7) 2288.60 +National Market Share (TPC-H Q8) 1388.84 +Product Type Profit Measure (TPC-H Q9) 3168.23 +Forecasting Revenue Change (TPC-H Q10) 3075.63 +Important Stock Identification (TPC-H Q11) 563.29 +Shipping Modes and Order Priority (TPC-H Q12) 2453.85 +Customer Distribution (TPC-H Q13) 6242.59 +Forecasting Revenue Change (TPC-H Q14) 1271.74 +Top Supplier Query (TPC-H Q15) 1382.80 +Parts/Supplier Relationship (TPC-H Q16) 1349.50 +Small-Quantity-Order Revenue (TPC-H Q17) 5621.15 +Large Volume Customer (TPC-H Q18) 18750.06 +Discounted Revenue (TPC-H Q19) 1919.85 +Potential Part Promotion (TPC-H Q20) 1131.92 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2704.33 +Global Sales Opportunity Query (TPC-H Q22) 444.20 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 0 0 0 0 0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.29 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4850.83 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 76 1 3 3126.32 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.36 0 0.22 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` + + +## Monitoring + +[Monitoring](Monitoring.html) can be activated for DBMS only (`-m`) or for all components (`-mc`). + +The `-mc` option is mandatory here: The sidecar container approach is not working (since bexhoma does not manage the deployment), so either you have Prometheus / Node exporter already installed in your cluster or a daemonset is needed. +Moreover the SUT itself cannot be monitored, since it is outside of the cluster. +For further explanation see the monitoring section of this documentation. + + +## Use Persistent Storage + +### Bexhoma Status Volume + +Persistent Storage is not managed by bexhoma, but by the Cloud service. +We can add the request for a PVC to the experiment setup: + +```bash +nohup python ycsb.py -ms 2 -tr \ + -sf 5 \ + -sfo 10 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_ycsb_databaseservice_3.log & +``` + +This will add a PVC to the Dummy DBMS. +Nothing will be stored there, but it maintains status information about previous loading processes. + +``` ++----------------------------------------+-----------------+--------------+--------------+-------------------+-----------------+----------------------+-----------+----------+--------+--------+ +| Volumes | configuration | experiment | loaded [s] | timeLoading [s] | dbms | storage_class_name | storage | status | size | used | ++========================================+=================+==============+==============+===================+=================+======================+===========+==========+========+========+ +| bexhoma-storage-databaseservice-ycsb-1 | databaseservice | ycsb-1 | True | 65 | DatabaseService | shared | 1Gi | Bound | 1.0G | 36M | ++----------------------------------------+-----------------+--------------+--------------+-------------------+-----------------+----------------------+-----------+----------+--------+--------+ +``` + +The above means there has been a YCSB loading process (managed by bexhoma) of size SF=1, that has been completed. +All following calls of such an experiment will skip loading, since the PVC tells it has been finished. +This thus helps to spare the `-sl` parameter. + +However bexhoma cannot verify such information. +If data is delete somehow, this PVC information will be outdated and wrong. + +This approach helps bexhoma to persist status information, but it does not persist data inside the Cloud database. + +```bash +## Show Summary + +### Workload +YCSB SF=5 + Type: ycsb + Duration: 3718s + Code: 1734700853 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 5000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250060572 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 34656.857878 145263.0 5000000 6815.5 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 65353.5 153014.0 4997965 653.0 5002035 1296.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 397.12 0.88 4.6 4.62 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 743.75 5.11 0.6 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` + +## YCSB Example Explained + + +### Configuration of Bexhoma + +In `cluster.config` there is a section: + +``` +'DatabaseService': { + 'loadData': 'psql -U postgres --host mydatabase.example.com --port 5432 < {scriptname}', + 'template': { + 'version': 'v1234', + 'alias': 'Cloud-A', + 'docker_alias': 'CL-A', + 'JDBC': { + 'driver': "org.postgresql.Driver", + 'auth': ["postgres", ""], + 'url': 'jdbc:postgresql://mydatabase.example.com:5432/postgres?reWriteBatchedInserts=true', + 'jar': 'postgresql-42.5.0.jar' + } + }, + 'logfile': '/usr/local/data/logfile', + 'datadir': '/var/lib/postgresql/data/', + 'priceperhourdollar': 0.0, +}, +``` + +where +* `loadData`: This command is used to create the schema +* `JDBC`: These infos are used to configure YCSB + +Please make sure to adjust this to the cloud service you want to benchmark. + + +### Preparation of YCSB + +In the Docker files for YCSB +* https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/images/ycsb/generator/Dockerfile +* https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/images/ycsb/benchmarker/Dockerfile + +there is a section about including the needed JDBC driver: +``` +######### Specific version of PostgreSQL JDBC ######### +RUN wget https://jdbc.postgresql.org/download/postgresql-42.5.0.jar --no-check-certificate +RUN cp postgresql-42.5.0.jar jars/postgresql-42.5.0.jar +``` + + +### Dummy SUT + +Bexhoma deploys a pod to carry status informations. +Here it is an instance of PostgreSQL: https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/k8s/deploymenttemplate-DatabaseService.yml + + +### Schema SQL File + +If data should be loaded, bexhoma at first creates a schema according to: https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/tree/master/experiments/ycsb/DatabaseService + + +### Workflow of YCSB + +In `ycsb.py` there is a section about DatabaseService. + +Watch for +* `config.monitoring_sut = False`: SUT cannot be monitored since it is outside of K8s + + + + + + + + + + + + + + + + +## Benchbase's TPC-C + +```bash +nohup python benchbase.py -ms 2 -tr \ + -sf 16 \ + -sd 5 \ + -dbms DatabaseService \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_databaseservice_1.log & +``` + +yields + +```bash +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 1091s + Code: 1734664810 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256016 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256020 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1873.16 19246.0 8535.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1820.81 21236.0 8782.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 150.0 1.0 1.0 384.0 +DatabaseService-1-1-1024-2 150.0 1.0 2.0 384.0 + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned +``` + +### Benchbase Example Explained + +The setup is the same as for YCSB (see above). + +However the connection string this time is not only read from `cluster.config`, but are also constructed from parameters that are set explicitly in the workflow file `benchbase.py`: + +``` +BENCHBASE_PROFILE = 'postgres', +BEXHOMA_DATABASE = 'postgres', +``` + +### Only Execution + +This time we skip loading (`-sl`), since the database is already present. + +```bash +nohup python benchbase.py -ms 2 -tr \ + -sf 16 \ + -sd 5 \ + -dbms DatabaseService \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -sl \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_databaseservice_2.log & +``` + +yields + +```bash +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 814s + Code: 1734665950 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1948.43 18329.0 8206.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1774.67 21116.0 9008.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 0 1 1 inf +DatabaseService-1-1-1024-2 0 1 2 inf + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned +``` + + + + +## TPC-H + +In the following we run TPC-H against the Cloud Database Service. + + +### Simple Run + +At first we run a simple power test against SF=3. +Components are monitored. + +```bash +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_1.log & +``` + +yields + +```bash +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 765s + Code: 1734666830 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256232 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6184.88 +Minimum Cost Supplier Query (TPC-H Q2) 2121.70 +Shipping Priority (TPC-H Q3) 2477.89 +Order Priority Checking Query (TPC-H Q4) 3120.41 +Local Supplier Volume (TPC-H Q5) 2263.59 +Forecasting Revenue Change (TPC-H Q6) 1158.38 +Forecasting Revenue Change (TPC-H Q7) 2326.20 +National Market Share (TPC-H Q8) 1410.37 +Product Type Profit Measure (TPC-H Q9) 3187.17 +Forecasting Revenue Change (TPC-H Q10) 3063.93 +Important Stock Identification (TPC-H Q11) 561.36 +Shipping Modes and Order Priority (TPC-H Q12) 2457.12 +Customer Distribution (TPC-H Q13) 6562.69 +Forecasting Revenue Change (TPC-H Q14) 1280.33 +Top Supplier Query (TPC-H Q15) 1396.18 +Parts/Supplier Relationship (TPC-H Q16) 1346.35 +Small-Quantity-Order Revenue (TPC-H Q17) 5626.89 +Large Volume Customer (TPC-H Q18) 19220.56 +Discounted Revenue (TPC-H Q19) 1909.47 +Potential Part Promotion (TPC-H Q20) 1216.54 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2784.81 +Global Sales Opportunity Query (TPC-H Q22) 465.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 97.0 1.0 216.0 322.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.32 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4783.05 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 77 1 3 3085.71 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 30.9 0.21 0.03 2.27 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.41 0 0.23 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` + +### Execution Only + +Now loading is skipped (`-sl`) as data is already present in the Cloud system. + +```bash +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -sl \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_2.log & +``` + +yields + +```bash +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 262s + Code: 1734667671 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256216 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6149.40 +Minimum Cost Supplier Query (TPC-H Q2) 2110.09 +Shipping Priority (TPC-H Q3) 2435.87 +Order Priority Checking Query (TPC-H Q4) 3075.44 +Local Supplier Volume (TPC-H Q5) 2234.95 +Forecasting Revenue Change (TPC-H Q6) 1171.11 +Forecasting Revenue Change (TPC-H Q7) 2288.60 +National Market Share (TPC-H Q8) 1388.84 +Product Type Profit Measure (TPC-H Q9) 3168.23 +Forecasting Revenue Change (TPC-H Q10) 3075.63 +Important Stock Identification (TPC-H Q11) 563.29 +Shipping Modes and Order Priority (TPC-H Q12) 2453.85 +Customer Distribution (TPC-H Q13) 6242.59 +Forecasting Revenue Change (TPC-H Q14) 1271.74 +Top Supplier Query (TPC-H Q15) 1382.80 +Parts/Supplier Relationship (TPC-H Q16) 1349.50 +Small-Quantity-Order Revenue (TPC-H Q17) 5621.15 +Large Volume Customer (TPC-H Q18) 18750.06 +Discounted Revenue (TPC-H Q19) 1919.85 +Potential Part Promotion (TPC-H Q20) 1131.92 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2704.33 +Global Sales Opportunity Query (TPC-H Q22) 444.20 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 0 0 0 0 0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.29 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4850.83 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 76 1 3 3126.32 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.36 0 0.22 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` + + +### Persistent Storage + +We now use a PVC to store infos about the loading process. +At first, we remove the placeholder and recreate it again. +```bash +# delete pvc of placeholder +kubectl delete pvc bexhoma-storage-databaseservice-tpch-3 + +sleep 10 + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +sleep 10 +``` + +### Ingestion with Persistent Storage + +```bash +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_3.log & +``` + +yields + +```bash +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 798s + Code: 1734668021 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216892 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5793.75 +Minimum Cost Supplier Query (TPC-H Q2) 1922.31 +Shipping Priority (TPC-H Q3) 2231.66 +Order Priority Checking Query (TPC-H Q4) 2835.42 +Local Supplier Volume (TPC-H Q5) 2072.36 +Forecasting Revenue Change (TPC-H Q6) 1066.68 +Forecasting Revenue Change (TPC-H Q7) 2155.03 +National Market Share (TPC-H Q8) 1301.26 +Product Type Profit Measure (TPC-H Q9) 2787.60 +Forecasting Revenue Change (TPC-H Q10) 2791.42 +Important Stock Identification (TPC-H Q11) 559.95 +Shipping Modes and Order Priority (TPC-H Q12) 2143.46 +Customer Distribution (TPC-H Q13) 5364.32 +Forecasting Revenue Change (TPC-H Q14) 1190.47 +Top Supplier Query (TPC-H Q15) 1291.81 +Parts/Supplier Relationship (TPC-H Q16) 1127.44 +Small-Quantity-Order Revenue (TPC-H Q17) 4912.21 +Large Volume Customer (TPC-H Q18) 16174.10 +Discounted Revenue (TPC-H Q19) 1735.84 +Potential Part Promotion (TPC-H Q20) 1088.33 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2474.25 +Global Sales Opportunity Query (TPC-H Q22) 488.84 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5279.56 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 70 1 3 3394.29 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 41.77 0.04 0.02 2.8 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 15.95 0 0.26 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` + +### Execution Only with Persistent Storage + +Data is now present in the database. +The persistent volume helps to memorize this. +We can rerun the experiment without explicitly skipping ingestion (no `-sl`). + +```bash +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_4.log & +``` + +yields + +```bash +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 273s + Code: 1734668861 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216876 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5785.36 +Minimum Cost Supplier Query (TPC-H Q2) 1938.19 +Shipping Priority (TPC-H Q3) 2255.59 +Order Priority Checking Query (TPC-H Q4) 2827.21 +Local Supplier Volume (TPC-H Q5) 2018.18 +Forecasting Revenue Change (TPC-H Q6) 1062.37 +Forecasting Revenue Change (TPC-H Q7) 2075.73 +National Market Share (TPC-H Q8) 1336.07 +Product Type Profit Measure (TPC-H Q9) 2808.84 +Forecasting Revenue Change (TPC-H Q10) 2804.71 +Important Stock Identification (TPC-H Q11) 583.68 +Shipping Modes and Order Priority (TPC-H Q12) 2137.04 +Customer Distribution (TPC-H Q13) 5569.93 +Forecasting Revenue Change (TPC-H Q14) 1130.83 +Top Supplier Query (TPC-H Q15) 1321.32 +Parts/Supplier Relationship (TPC-H Q16) 1239.32 +Small-Quantity-Order Revenue (TPC-H Q17) 5228.78 +Large Volume Customer (TPC-H Q18) 17602.90 +Discounted Revenue (TPC-H Q19) 1735.06 +Potential Part Promotion (TPC-H Q20) 1018.27 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2464.08 +Global Sales Opportunity Query (TPC-H Q22) 451.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5249.82 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 71 1 3 3346.48 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 16.0 0 0.25 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` diff --git a/docs/Example-CockroachDB.md b/docs/Example-CockroachDB.md index 5d588a27b..19874adb3 100644 --- a/docs/Example-CockroachDB.md +++ b/docs/Example-CockroachDB.md @@ -96,11 +96,11 @@ Cluster Prometheus: Running Message Queue: Running Data directory: Running Result directory: Running -+------------------------+--------------+--------------+------------+---------------+ -| 1730133803 | sut | loaded [s] | use case | benchmarker | -+========================+==============+==============+============+===============+ -| CockroachDB-64-8-65536 | (1. Running) | 41 | ycsb | (1. Running) | -+------------------------+--------------+--------------+------------+---------------+ ++----------------------+--------------+--------------+----------------+-------------------------------+-------------+ +| 1734624013 | sut | loaded [s] | use case | worker | loading | ++======================+==============+==============+================+===============================+=============+ +| CockroachDB-1-1-1024 | (1. Running) | 2 | benchbase_tpcc | (Running) (Running) (Running) | (1 Running) | ++----------------------+--------------+--------------+----------------+-------------------------------+-------------+ ``` The code `1730133803` is the unique identifier of the experiment. @@ -121,11 +121,11 @@ At the end of a benchmark you will see a summary like ### Workload YCSB SF=1 Type: ycsb - Duration: 1912s - Code: 1730301195 - This includes no queries. YCSB runs the benchmark + Duration: 1056s + Code: 1734645173 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. - Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. System metrics are monitored by a cluster-wide installation. Benchmark is limited to DBMS ['CockroachDB']. @@ -133,7 +133,6 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. - Database is persisted to disk of type shared and size 30Gi. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -141,60 +140,48 @@ YCSB SF=1 ### Connections CockroachDB-64-8-65536-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254908644 + disk:249215592 requests_cpu:4 requests_memory:16Gi worker 0 - RAM:1081965535232 - CPU: - GPU: - GPUIDs:[] + RAM:1081966526464 Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker27 - disk:726928680 - datadisk:107393516 - volume_size:30G - volume_used:1.5G - cuda: + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:446476124 + datadisk:116276640 + volume_size:1000G + volume_used:109G worker 1 - RAM:1081750962176 - CPU: - GPU: - GPUIDs:[] + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:634371096 + datadisk:116064760 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081751019520 Cores:128 - host:5.15.0-122-generic + host:5.15.0-126-generic node:cl-worker29 - disk:391582960 - datadisk:107344022 - volume_size:30G - volume_used:1.6G - cuda: - worker 2 - RAM:1081966493696 - CPU: - GPU: - GPUIDs:[] - Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker28 - disk:676774188 - datadisk:107343598 - volume_size:30G - volume_used:1.6G - cuda: + disk:153231576 + datadisk:116065120 + volume_size:1000G + volume_used:109G ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -CockroachDB-64-8-65536 1 64 65536 8 1185.98666 845744.0 1000000 255295.0 +CockroachDB-64-8-65536 1 64 65536 8 16211.343884 61937.0 1000000 7579.5 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -CockroachDB-64-8-65536-1 1 64 65536 1 1337.57 747625.0 499547 71359.0 500453 1549311.0 +CockroachDB-64-8-65536-1 1 64 65536 1 14106.68 708884.0 5000094 5851.0 4999906 130879.0 ### Workflow @@ -206,19 +193,19 @@ DBMS CockroachDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 2363.41 2.54 4.95 9.51 +CockroachDB-64-8-65536-1 888.04 0.03 3.05 5.76 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 227.61 0.15 4.56 4.58 +CockroachDB-64-8-65536-1 103.7 0 4.34 4.37 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 3054.28 2.11 7.28 12.92 +CockroachDB-64-8-65536-1 20657.12 20.74 12.51 26.94 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 138.39 0.21 0.58 0.58 +CockroachDB-64-8-65536-1 1024.62 1.56 0.6 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN @@ -243,146 +230,6 @@ This forwards ports, so you have * a Jupyter notebook server at http://localhost:8888 -## Perform YCSB Benchmark - Execution only - -For performing the experiment we can run the [ycsb file](https://github.com/Beuth-Erdelt/Benchmark-Experiment-Host-Manager/blob/master/ycsb.py). - -Example: -```bash -nohup python ycsb.py -ms 1 -tr \ - -sf 1 \ - -sfo 10 \ - -nw 3 \ - --workload a \ - -dbms CockroachDB \ - -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ - -tb 16384 \ - -nlp 8 \ - -nlt 64 \ - -nlf 4 \ - -nbp 1 \ - -nbt 64 \ - -nbf 4 \ - -ne 1 \ - -nc 1 \ - -m -mc \ - run $LOG_DIR/doc_ycsb_cockroachdb_2.log & -``` - - -```bash -## Show Summary - -### Workload -YCSB SF=1 - Type: ycsb - Duration: 1209s - Code: 1730404688 - This includes no queries. YCSB runs the benchmark - This experiment compares run time and resource consumption of YCSB queries. - Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. - YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. - System metrics are monitored by a cluster-wide installation. - Benchmark is limited to DBMS ['CockroachDB']. - Import is handled by 8 processes (pods). - Loading is fixed to cl-worker19. - Benchmarking is fixed to cl-worker19. - SUT is fixed to cl-worker11. - Loading is tested with [64] threads, split into [8] pods. - Benchmarking is tested with [64] threads, split into [1] pods. - Benchmarking is run as [1] times the number of benchmarking pods. - Experiment is run once. - -### Connections -CockroachDB-64-8-65536-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 - Cores:64 - host:5.15.0-116-generic - node:cl-worker11 - disk:254913416 - requests_cpu:4 - requests_memory:16Gi - worker 0 - RAM:1081966493696 - CPU: - GPU: - GPUIDs:[] - Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker28 - disk:684869688 - datadisk:108191232 - volume_size:1000G - volume_used:101G - cuda: - worker 1 - RAM:1081965535232 - CPU: - GPU: - GPUIDs:[] - Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker27 - disk:729321392 - datadisk:107983636 - volume_size:1000G - volume_used:101G - cuda: - worker 2 - RAM:1081750962176 - CPU: - GPU: - GPUIDs:[] - Cores:128 - host:5.15.0-122-generic - node:cl-worker29 - disk:406572384 - datadisk:107980308 - volume_size:1000G - volume_used:101G - cuda: - -### Loading - experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -CockroachDB-64-8-65536 1 64 65536 8 12047.323421 83583.0 1000000 17711.0 - -### Execution - experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -CockroachDB-64-8-65536-1 1 64 65536 1 12148.6 823140.0 4996430 7303.0 5003570 163711.0 - -### Workflow - -#### Actual -DBMS CockroachDB-64-8-65536 - Pods [[1]] - -#### Planned -DBMS CockroachDB-64-8-65536 - Pods [[1]] - -### Ingestion - SUT - CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 1495.11 8.35 4.2 7.63 - -### Ingestion - Loader - CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 91.09 0 4.29 4.31 - -### Execution - SUT - CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 21507.31 17.92 10.1 22.14 - -### Execution - Benchmarker - CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 1184.95 1.48 0.61 0.61 - -### Tests -TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN -TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN -TEST passed: Ingestion SUT contains no 0 or NaN in CPU [CPUs] -TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] -TEST passed: Execution SUT contains no 0 or NaN in CPU [CPUs] -TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] -TEST passed: Workflow as planned -``` ## Monitoring @@ -395,7 +242,7 @@ In this example, this means that used memory, CPU time, etc. are summed across a ## Use Persistent Storage -To be described: Persistent storage is per experiment here +To be described: Persistent storage is per experiment here, because K8s statefulsets derive their pvc names directly from pod names. ## YCSB Example Explained @@ -440,15 +287,23 @@ If data should be loaded, bexhoma at first creates a schema according to: https: + + + + + + ## Benchbase's TPC-C TPC-C is performed at 16 warehouses. The 16 threads of the client are split into a cascading sequence of 1 and 2 pods. +CockroachDB has 3 workers. ```bash nohup python benchbase.py -ms 1 -tr \ -sf 16 \ -sd 5 \ + -nw 3 \ -dbms CockroachDB \ -nbp 1,2 \ -nbt 16 \ @@ -466,9 +321,9 @@ nohup python benchbase.py -ms 1 -tr \ ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1158s - Code: 1730373213 - This includes no queries. Benchbase runs the benchmark + Duration: 1166s + Code: 1734646253 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -484,41 +339,95 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215596 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461657896 + datadisk:116314488 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635102812 + datadisk:116104180 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:123840188 + datadisk:116091372 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215600 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461867536 + datadisk:116522308 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635488320 + datadisk:116308436 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124062476 + datadisk:116312956 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 16 16384 1 300.0 697.93 55073.0 22911.0 -CockroachDB-1-1-1024-2 1 16 16384 2 300.0 637.91 62856.0 25067.5 +CockroachDB-1-1-1024-1 1 16 16384 1 300.0 312.89 95381.0 51118.0 +CockroachDB-1-1-1024-2 1 16 16384 2 300.0 241.61 142861.0 66206.0 Warehouses: 16 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 236.0 1.0 1.0 244.067797 -CockroachDB-1-1-1024-2 236.0 1.0 2.0 244.067797 +CockroachDB-1-1-1024-1 267.0 1.0 1.0 215.730337 +CockroachDB-1-1-1024-2 267.0 1.0 2.0 215.730337 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN @@ -534,6 +443,7 @@ The 64 threads of the client are split into a cascading sequence of 1,2,4 and 8 nohup python benchbase.py -ms 1 -tr \ -sf 128 \ -sd 60 \ + -nw 3 \ -dbms CockroachDB \ -nbp 1,2,4,8 \ -nbt 64 \ @@ -551,9 +461,9 @@ nohup python benchbase.py -ms 1 -tr \ ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 15418s - Code: 1730374413 - This includes no queries. Benchbase runs the benchmark + Duration: 15957s + Code: 1734647454 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -569,61 +479,169 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215616 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:469202612 + datadisk:123845016 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:160789996 + datadisk:123623440 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:644345576 + datadisk:123626156 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912216 + disk:249215820 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:475115332 + datadisk:129756564 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:166744188 + datadisk:129577516 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:654361308 + datadisk:129571596 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-3 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912388 + disk:249216060 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:478912840 + datadisk:133546860 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:170393272 + datadisk:133226492 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:661848004 + datadisk:133214428 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-4 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912556 + disk:249216408 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:481732908 + datadisk:136364000 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:173351696 + datadisk:136184808 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:667527460 + datadisk:136153100 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 696.70 233544.0 91853.00 -CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 653.01 247449.0 97998.00 -CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 599.12 258276.0 106813.50 -CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 538.01 262975.0 118944.88 +CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 1097.64 144074.0 58301.00 +CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 1026.91 161894.0 62323.00 +CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 908.92 181035.0 70443.25 +CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 675.46 224333.0 94757.50 Warehouses: 128 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[8, 4, 2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 8, 4]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 531.0 1.0 1.0 867.79661 -CockroachDB-1-1-1024-2 531.0 1.0 2.0 867.79661 -CockroachDB-1-1-1024-3 531.0 1.0 4.0 867.79661 -CockroachDB-1-1-1024-4 531.0 1.0 8.0 867.79661 +CockroachDB-1-1-1024-1 1036.0 1.0 1.0 444.787645 +CockroachDB-1-1-1024-2 1036.0 1.0 2.0 444.787645 +CockroachDB-1-1-1024-3 1036.0 1.0 4.0 444.787645 +CockroachDB-1-1-1024-4 1036.0 1.0 8.0 444.787645 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/docs/Example-TPC-DS.md b/docs/Example-TPC-DS.md index 946ac8d65..9fca04f33 100644 --- a/docs/Example-TPC-DS.md +++ b/docs/Example-TPC-DS.md @@ -809,7 +809,7 @@ TEST passed: Workflow as planned ``` This gives a survey about CPU (in CPU seconds) and RAM usage (in Gb) during loading and execution of the benchmark. -PostgreSQL is fast, so we cannot see a lot (metrics are fetched every 30 seconds). +MonetDB is fast, so we cannot see a lot (metrics are fetched every 30 seconds). ## Perform Benchmark - Throughput Test @@ -1081,11 +1081,11 @@ All other instances just use the database without generating and loading data. | bexhoma-storage-monetdb-tpcds-100 | monetdb | tpcds-100 | True | 4019 | MonetDB | shared | 300Gi | Bound | 300G | 156G | +-----------------------------------------+-----------------+--------------+--------------+-------------------+------------+----------------------+-----------+----------+--------+--------+ -+------------------+--------------+--------------+---------------+ -| 1707740320 | sut | loaded [s] | benchmarker | -+==================+==============+==============+===============+ -| PostgreSQL-BHT-8 | (1. Running) | 185.41 | (1. Running) | -+------------------+--------------+--------------+---------------+ ++---------------+--------------+--------------+---------------+ +| 1707740320 | sut | loaded [s] | benchmarker | ++===============+==============+==============+===============+ +| MonetDB-BHT-8 | (1. Running) | 185.41 | (1. Running) | ++---------------+--------------+--------------+---------------+ ``` The result looks something like diff --git a/docs/Example-TPC-H.md b/docs/Example-TPC-H.md index 98002a16e..5ac65ce99 100644 --- a/docs/Example-TPC-H.md +++ b/docs/Example-TPC-H.md @@ -45,7 +45,6 @@ nohup python tpch.py -ms 4 -dt -tr \ This * starts a clean instance of PostgreSQL, MonetDB, MySQL and MariaDB (at the same time, `-ms`) * data directory inside a Docker container - * with a maximum of 1 DBMS per time (`-ms`) * creates TPC-H schema in each database * starts 8 loader pods per DBMS (`-nlp`) * with a data generator (init) container each diff --git a/docs/Example-YugaByteDB.md b/docs/Example-YugaByteDB.md index 80c3c0387..e95587ed9 100644 --- a/docs/Example-YugaByteDB.md +++ b/docs/Example-YugaByteDB.md @@ -64,18 +64,24 @@ helm status bexhoma ``` -Remove the installation: +After the experiment: Remove the installation ```bash helm delete bexhoma kubectl delete pvc -l app=yb-tserver kubectl delete pvc -l app=yb-master ``` -Connecting +Optionally: Connect to the installation * to DBMS: `kubectl port-forward service/yb-tserver-service 5433:5433` * to GUI: `kubectl port-forward service/yb-master-ui 8080:7000` +Optionally: Use [YugabyteDB connection manager](https://docs.yugabyte.com/preview/explore/going-beyond-sql/connection-mgr-ysql/) by adding +``` +gflags.tserver.enable_ysql_conn_mgr=true,\ +gflags.tserver.allowed_preview_flags_csv=enable_ysql_conn_mgr,\ +``` + ## Perform YCSB Benchmark - Ingestion of Data Included You will have to change the node selectors there (to names of nodes, that exist in your cluster - or to leave out the corresponding parameters): @@ -168,9 +174,9 @@ At the end of a benchmark you will see a summary like ### Workload YCSB SF=1 Type: ycsb - Duration: 773s - Code: 1730133803 - This includes no queries. YCSB runs the benchmark + Duration: 562s + Code: 1734625544 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -187,23 +193,44 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319416 + disk:249253840 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439206828 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122936080 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584264864 ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -YugabyteDB-64-8-65536 1 64 65536 8 16556.163255 60725.0 1000000 61163.0 +YugabyteDB-64-8-65536 1 64 65536 8 28456.559524 35509.0 1000000 15762.0 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 20041.33 498969.0 5001600 63903.0 4998400 65599.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31861.75 313856.0 4998066 37663.0 5001934 43039.0 ### Workflow @@ -215,19 +242,19 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 2423.24 13.34 12.39 16.56 +YugabyteDB-64-8-65536-1 925.63 4.64 1.75 5.32 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 52.99 0 2.83 2.87 +YugabyteDB-64-8-65536-1 0.09 0 0.01 0.01 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19524.15 26.08 14.07 24.03 +YugabyteDB-64-8-65536-1 13499.7 15.99 4.92 16.26 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 934.46 2.07 0.61 0.61 +YugabyteDB-64-8-65536-1 932.99 3.2 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN @@ -261,7 +288,6 @@ Example: nohup python ycsb.py -ms 1 -tr \ -sf 1 \ -sfo 10 \ - -sl \ --workload a \ -dbms YugabyteDB \ -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ @@ -275,6 +301,7 @@ nohup python ycsb.py -ms 1 -tr \ -ne 1 \ -nc 1 \ -m -mc \ + -sl \ run $LOG_DIR/doc_ycsb_yugabytedb_2.log & ``` @@ -286,9 +313,9 @@ This skips loading (`-sl`), as data is already present in the database. ### Workload YCSB SF=1 Type: ycsb - Duration: 690s - Code: 1730223222 - This includes no queries. YCSB runs the benchmark + Duration: 511s + Code: 1734626144 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -298,6 +325,7 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. + Loading is skipped. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -305,19 +333,40 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319248 + disk:249253676 datadisk:39268 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441407636 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125219072 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586475888 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 19547.36 511578.0 4998778 64703.0 5001222 66239.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31287.55 319616.0 4999554 39551.0 5000446 43999.0 ### Workflow @@ -329,11 +378,11 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19802.0 26.15 14.21 24.03 +YugabyteDB-64-8-65536-1 13772.52 16.0 7.23 23.99 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 1039.41 2.13 0.61 0.61 +YugabyteDB-64-8-65536-1 982.63 3.4 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN @@ -359,7 +408,8 @@ For further explanation see the monitoring section of this documentation. ### Bexhoma Status Volume Persistent Storage is not managed by bexhoma, but by YugabyteDB. -We can add the request for a PVC to the experiment setup: +We can add the request for a PVC to the experiment setup. +Make sure to reset the database before this test as it should not contain data from previous test runs. ```bash nohup python ycsb.py -ms 1 -tr \ -sf 1 \ @@ -400,6 +450,106 @@ If YugabyteDB is restarted or data is delete somehow, this PVC information will This approach helps bexhoma to persist status information, but it does not persist data inside YugabyteDB. +```bash +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 591s + Code: 1734626805 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['YugabyteDB']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249214420 + datadisk:39106 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439198928 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122937852 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584265648 + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +YugabyteDB-64-8-65536 1 64 65536 8 27920.198198 36082.0 1000000 15099.0 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +YugabyteDB-64-8-65536-1 1 64 65536 1 30852.87 324119.0 4999076 26703.0 5000924 44287.0 + +### Workflow + +#### Actual +DBMS YugabyteDB-64-8-65536 - Pods [[1]] + +#### Planned +DBMS YugabyteDB-64-8-65536 - Pods [[1]] + +### Ingestion - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +YugabyteDB-64-8-65536-1 14677.38 4.86 4.23 13.99 + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +YugabyteDB-64-8-65536-1 0.08 0 0.01 0.01 + +### Execution - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +YugabyteDB-64-8-65536-1 14586.01 17.96 4.88 16.69 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +YugabyteDB-64-8-65536-1 771.47 2.69 0.61 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Ingestion SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned +``` ### Persist YugabyteDB @@ -469,15 +619,24 @@ In `ycsb.py` there is a section about YugabyteDB. Watch for * `config.sut_service_name`: Fixed name for the service of the SUT (="yb-tserver-service") -* `config.sut_container_name`: Fixed name for the container of the SUT (="yb-tserver") -* `config.create_monitoring()`: Method to create names for monitored components (for SUT = "yb-tserver-") -* `config.get_worker_endpoints()`: ? -* `config.set_metric_of_config()`: Method to create promql queries from templates (pod like "yb-tserver", no container name) +* `config.sut_container_name`: Fixed name for the container of the SUT (="") +* `config.get_worker_pods()`: Method to find the pods of worker nodes (['yb-tserver-0', 'yb-tserver-1', 'yb-tserver-2']). This allows getting host infos like CPU, RAM, node name, ... +* `config.create_monitoring()`: Method to create names for monitored components (for SUT = "yb-tserver-"). This avoids the SUT dummy contributing to the monitoring. +* `config.get_worker_endpoints()`: This is neccessary, when we have sidecar containers attached to workers of a distributed dbms. Monitoring needs to find these containers. +* `config.set_metric_of_config()`: Method to create promql queries from templates (pod like "yb-tserver", no container name, for our SUT) + + + + + + ## Benchbase's TPC-C +TPC-C is performed at 16 warehouses. The 16 threads of the client are split into a cascading sequence of 1 and 2 pods. + ```bash nohup python benchbase.py -ms 1 -tr \ -sf 16 \ @@ -499,9 +658,9 @@ yields ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1026s - Code: 1730223936 - This includes no queries. Benchbase runs the benchmark + Duration: 1067s + Code: 1734627587 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -517,45 +676,87 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319408 + disk:249254028 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:440540068 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:585609424 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124275600 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254032 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441361868 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586458512 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125118168 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 395.54 100821.0 40433.0 -YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 346.81 112470.0 46113.5 +YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 403.95 96762.0 39594.0 +YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 362.52 111737.0 44178.0 Warehouses: 16 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] +DBMS YugabyteDB-1-1-1024 - Pods [[2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 200.0 1.0 1.0 288.0 -YugabyteDB-1-1-1024-2 200.0 1.0 2.0 288.0 +YugabyteDB-1-1-1024-1 216.0 1.0 1.0 266.666667 +YugabyteDB-1-1-1024-2 216.0 1.0 2.0 266.666667 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN @@ -578,6 +779,9 @@ BEXHOMA_PORT = 5433, ### More Complex Example +We now run Benchbase's TPC-C variant with more data, for a longer period of time and with a varying number of pods for execution. +Make sure to reset the database before this test as it should not contain data from previous test runs. + ```bash nohup python benchbase.py -ms 1 -tr \ @@ -594,15 +798,15 @@ nohup python benchbase.py -ms 1 -tr \ yields -``` +```bash ## Show Summary ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 16098s - Code: 1730226312 - This includes no queries. Benchbase runs the benchmark + Duration: 16283s + Code: 1734628788 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -618,69 +822,153 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254048 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:458849332 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:603879336 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:143144816 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319748 + disk:249254240 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450176968 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595302840 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:133934964 YugabyteDB-1-1-1024-3 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319920 + disk:249254432 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450484892 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595477572 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134049872 YugabyteDB-1-1-1024-4 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254320088 + disk:249254644 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:456563680 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:640944212 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134098940 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 469.61 327056.0 136271.00 -YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 450.66 357886.0 141998.50 -YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 402.57 409184.0 159129.50 -YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 247.49 896527.0 258644.62 +YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 482.34 346967.0 132675.00 +YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 388.47 414796.0 164642.00 +YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 376.70 430011.0 169282.00 +YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 258.70 767035.0 244458.62 Warehouses: 128 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 4, 2, 8]] +DBMS YugabyteDB-1-1-1024 - Pods [[8, 4, 2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 1151.0 1.0 1.0 400.347524 -YugabyteDB-1-1-1024-2 1151.0 1.0 2.0 400.347524 -YugabyteDB-1-1-1024-3 1151.0 1.0 4.0 400.347524 -YugabyteDB-1-1-1024-4 1151.0 1.0 8.0 400.347524 +YugabyteDB-1-1-1024-1 1033.0 1.0 1.0 446.07938 +YugabyteDB-1-1-1024-2 1033.0 1.0 2.0 446.07938 +YugabyteDB-1-1-1024-3 1033.0 1.0 4.0 446.07938 +YugabyteDB-1-1-1024-4 1033.0 1.0 8.0 446.07938 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/docs/Monitoring.md b/docs/Monitoring.md index 349234223..304a81b7e 100644 --- a/docs/Monitoring.md +++ b/docs/Monitoring.md @@ -16,7 +16,7 @@ Probably you won't have to change much. If there is a Prometheus server running in your cluster, make sure to adjust `service_monitoring`. If there is no Prometheus server running in your cluster, make sure to leave the template in `service_monitoring` as is. Bexhoma checks at the beginning of an experiment if the URL provided is reachable; -it uses cURL inside the dashboard pod to test if `query_range?query=node_memory_MemTotal_bytes&start=1&end=2&step=1` has a return status of 200. +it uses cURL inside the dashboard pod to test if `query_range?query=sum(node_memory_MemTotal_bytes)&start={start}&end={end}&step=60` has a return status of 200 (where `start` is 5 min ago and `end` is 4 min ago). If there is no preinstalled Prometheus in the cluster, bexhoma will in case of * Monitor only the system-under-test (SUT) with `-m` diff --git a/docs/conf.py b/docs/conf.py index 48078e0da..b7dd0fe26 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,8 +6,13 @@ copyright = '2021, Patrick K. Erdelt' author = 'Patrick K. Erdelt' -release = '0.6' -version = '0.6.1' +import importlib.metadata + +release = importlib.metadata.version('bexhoma') +version = release + +#release = '0.6' +#version = '0.6.1' language = 'en' # -- General configuration diff --git a/docs/index.rst b/docs/index.rst index fc6ee8000..4de86bb72 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -19,6 +19,7 @@ ./Example-TPC-DS.md ./Example-YugaByteDB.md ./Example-CockroachDB.md + ./Example-CloudDatabase.md ./Example-custom.md ./Tool.md ./Concept.md diff --git a/docs/requirements.txt b/docs/requirements.txt index fab6a2184..2f2fac78f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,4 @@ sphinx sphinx-rtd-theme sphinx-mdinclude +bexhoma diff --git a/experiments/benchbase/queries.config b/experiments/benchbase/queries.config index bdaedc510..a2c17d045 100644 --- a/experiments/benchbase/queries.config +++ b/experiments/benchbase/queries.config @@ -1,6 +1,6 @@ { 'name': "Benchbase Queries", - 'intro': "This includes no queries. Benchbase runs the benchmark", + 'intro': "Benchbase runs the benchmark.", 'factor': 'mean', 'connectionmanagement': { 'timeout': 600, diff --git a/experiments/tpcc/queries.config b/experiments/tpcc/queries.config index 5d721fc07..3f9e43698 100644 --- a/experiments/tpcc/queries.config +++ b/experiments/tpcc/queries.config @@ -1,6 +1,6 @@ { 'name': "The TPC-C Queries", - 'intro': "This includes no queries. HammerDB runs the benchmark", + 'intro': "HammerDB runs the benchmark.", 'factor': 'mean', 'connectionmanagement': { 'timeout': 600, diff --git a/experiments/tpcds/queries-tpcds.config b/experiments/tpcds/queries-tpcds.config index b1ea7ea38..1dc50ade5 100644 --- a/experiments/tpcds/queries-tpcds.config +++ b/experiments/tpcds/queries-tpcds.config @@ -1725,9 +1725,9 @@ ,i_class ,i_current_price order by - i_category - ,i_class - ,i_item_id + i_category is not null, i_category + ,i_class is not null, i_class + ,i_item_id is not null, i_item_id ,i_item_desc ,revenueratio limit 100""", @@ -3609,10 +3609,10 @@ ,i_item_id ,sum(case when (cast(d_date as date) < cast('{YEAR}-{MONTH}-{DAY}' as date)) then inv_quantity_on_hand - else 0 end) as inv_before + else 0. end) as inv_before ,sum(case when (cast(d_date as date) >= cast('{YEAR}-{MONTH}-{DAY}' as date)) then inv_quantity_on_hand - else 0 end) as inv_after + else 0. end) as inv_after from inventory ,warehouse ,item @@ -3625,7 +3625,7 @@ and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '30' day) group by w_warehouse_name, i_item_id) x where (case when inv_before > 0 - then inv_after / inv_before + then CAST(inv_after AS FLOAT) / inv_before else null end) between 2.0/3.0 and 3.0/2.0 order by w_warehouse_name is not null, w_warehouse_name @@ -4689,78 +4689,78 @@ }, { 'title': "TPC-DS Q33", - 'query': """ with ss as ( - select - i_manufact_id,sum(ss_ext_sales_price) total_sales - from - store_sales, - date_dim, - customer_address, - item - where - i_manufact_id in (select - i_manufact_id - from - item - where i_category in ('{CATEGORY}')) - and ss_item_sk = i_item_sk - and ss_sold_date_sk = d_date_sk - and d_year = {YEAR} - and d_moy = {MONTH} - and ss_addr_sk = ca_address_sk - and ca_gmt_offset = {GMT} - group by i_manufact_id), - cs as ( - select - i_manufact_id,sum(cs_ext_sales_price) total_sales - from - catalog_sales, - date_dim, - customer_address, - item - where - i_manufact_id in (select - i_manufact_id - from - item - where i_category in ('{CATEGORY}')) - and cs_item_sk = i_item_sk - and cs_sold_date_sk = d_date_sk - and d_year = {YEAR} - and d_moy = {MONTH} - and cs_bill_addr_sk = ca_address_sk - and ca_gmt_offset = {GMT} - group by i_manufact_id), - ws as ( - select - i_manufact_id,sum(ws_ext_sales_price) total_sales - from - web_sales, - date_dim, - customer_address, - item - where - i_manufact_id in (select - i_manufact_id - from - item - where i_category in ('{CATEGORY}')) - and ws_item_sk = i_item_sk - and ws_sold_date_sk = d_date_sk - and d_year = {YEAR} - and d_moy = {MONTH} - and ws_bill_addr_sk = ca_address_sk - and ca_gmt_offset = {GMT} - group by i_manufact_id) - select i_manufact_id ,sum(total_sales) total_sales - from (select * from ss - union all - select * from cs - union all - select * from ws) tmp1 - group by i_manufact_id - order by total_sales is not null, total_sales - limit 100""", + 'query': """with ss as ( + select + i_manufact_id,sum(ss_ext_sales_price) total_sales + from + store_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ss_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id), + cs as ( + select + i_manufact_id,sum(cs_ext_sales_price) total_sales + from + catalog_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and cs_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id), + ws as ( + select + i_manufact_id,sum(ws_ext_sales_price) total_sales + from + web_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ws_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id) + select i_manufact_id ,sum(total_sales) total_sales + from (select * from ss + union all + select * from cs + union all + select * from ws) tmp1 + group by i_manufact_id + order by total_sales + limit 100""", 'parameter': { 'GMT': { @@ -7867,7 +7867,7 @@ LIMIT 100""" ,w_county ,w_state ,w_country - ,CONCAT('{SMC1}', ',', '{SMC2}') as ship_carriers + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers ,d_year as years ,sum(case when d_moy = 1 then {SALESTWO}* cs_quantity else 0 end) as jan_sales @@ -8084,7 +8084,7 @@ LIMIT 100""" ,w_county ,w_state ,w_country - ,CONCAT('{SMC1}', ',', '{SMC2}') as ship_carriers + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers ,d_year as years ,sum(case when d_moy = 1 then {SALESTWO}* cs_quantity else 0 end) as jan_sales @@ -8562,7 +8562,7 @@ LIMIT 100""" and s_state in ( select s_state from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking from store_sales, store, date_dim where d_month_seq between {DMS} and {DMS}+11 and d_date_sk = ss_sold_date_sk @@ -8598,7 +8598,7 @@ WHERE SELECT s_state FROM ( SELECT s_state, - RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) DESC) AS ranking + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking FROM store_sales JOIN store ON s_store_sk = ss_store_sk JOIN date_dim ON d_date_sk = ss_sold_date_sk @@ -8631,7 +8631,7 @@ WHERE SELECT s_state FROM ( SELECT s_state, - RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) DESC) AS ranking + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking FROM store_sales JOIN store ON s_store_sk = ss_store_sk JOIN date_dim ON d_date_sk = ss_sold_date_sk @@ -8661,7 +8661,7 @@ WHERE SELECT s_state FROM ( SELECT s_state, - RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) DESC) AS ranking + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking FROM store_sales JOIN store ON s_store_sk = ss_store_sk JOIN date_dim ON d_date_sk = ss_sold_date_sk @@ -8683,7 +8683,7 @@ LIMIT 100""", ,rank() over ( partition by grouping(s_state)+grouping(s_county), case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) is not null, sum(ss_net_profit) desc) as rank_within_parent + order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as rank_within_parent from store_sales ,date_dim d1 @@ -8695,7 +8695,7 @@ LIMIT 100""", and s_state in ( select s_state from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking from store_sales, store, date_dim where d_month_seq between {DMS} and {DMS}+11 and d_date_sk = ss_sold_date_sk @@ -8718,7 +8718,7 @@ LIMIT 100""", ,rank() over ( partition by grouping(s_state)+grouping(s_county), case when grouping(s_county) = 0 then s_state end - order by sum(ss_net_profit) desc) as rank_within_parent + order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as rank_within_parent from store_sales ,date_dim d1 @@ -8730,7 +8730,7 @@ LIMIT 100""", and s_state in ( select s_state from (select s_state as s_state, - rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking from store_sales, store, date_dim where d_month_seq between {DMS} and {DMS}+11 and d_date_sk = ss_sold_date_sk @@ -8805,7 +8805,7 @@ FROM ( SELECT s_state FROM ( SELECT s_state, - RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) DESC) AS ranking + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking FROM store_sales JOIN store ON s_store_sk = ss_store_sk JOIN date_dim ON d_date_sk = ss_sold_date_sk @@ -10116,7 +10116,7 @@ LIMIT 100""" 'query': """with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10137,7 +10137,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10158,7 +10158,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10178,27 +10178,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns) as returns + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns + , returns_amt , profit from wsr ) x @@ -10210,7 +10210,7 @@ LIMIT 100""" 'MariaDB': """with total as (with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns_angepasst, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10231,7 +10231,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns_angepasst, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10252,7 +10252,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns_angepasst, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10272,27 +10272,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns_angepasst) as returns_angepasst + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns_angepasst + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns_angepasst + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns_angepasst + , returns_amt , profit from wsr ) x @@ -10304,7 +10304,7 @@ LIMIT 100""" 'MonetDB': """with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns_angepasst, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10325,7 +10325,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns_angepasst, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10346,7 +10346,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns_angepasst, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10366,27 +10366,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns_angepasst) as returns_angepasst + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns_angepasst + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns_angepasst + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns_angepasst + , returns_amt , profit from wsr ) x @@ -10397,7 +10397,7 @@ LIMIT 100""" 'PostgreSQL': """with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns_angepasst, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10418,7 +10418,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns_angepasst, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10439,7 +10439,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns_angepasst, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10459,27 +10459,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns_angepasst) as returns_angepasst + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns_angepasst + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns_angepasst + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns_angepasst + , returns_amt , profit from wsr ) x @@ -10490,7 +10490,7 @@ LIMIT 100""" 'Exasol': """with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns_angepasst, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10511,7 +10511,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns_angepasst, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10532,7 +10532,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns_angepasst, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10552,27 +10552,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns_angepasst) as returns_angepasst + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns_angepasst + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns_angepasst + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns_angepasst + , returns_amt , profit from wsr ) x @@ -10583,7 +10583,7 @@ LIMIT 100""" 'MemSQL': """with ssr as (select s_store_id as store_id, sum(ss_ext_sales_price) as sales, - sum(coalesce(sr_return_amt, 0)) as returns_angepasst, + sum(coalesce(sr_return_amt, 0)) as returns_amt, sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit from store_sales left outer join store_returns on (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), @@ -10604,7 +10604,7 @@ LIMIT 100""" csr as (select cp_catalog_page_id as catalog_page_id, sum(cs_ext_sales_price) as sales, - sum(coalesce(cr_return_amount, 0)) as returns_angepasst, + sum(coalesce(cr_return_amount, 0)) as returns_amt, sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit from catalog_sales left outer join catalog_returns on (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), @@ -10625,7 +10625,7 @@ LIMIT 100""" wsr as (select web_site_id, sum(ws_ext_sales_price) as sales, - sum(coalesce(wr_return_amt, 0)) as returns_angepasst, + sum(coalesce(wr_return_amt, 0)) as returns_amt, sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit from web_sales left outer join web_returns on (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), @@ -10645,27 +10645,27 @@ LIMIT 100""" select channel , id , sum(sales) as sales - , sum(returns_angepasst) as returns_angepasst + , sum(returns_amt) as returns_amt , sum(profit) as profit from (select 'store channel' as channel , 'store' || store_id as id , sales - , returns_angepasst + , returns_amt , profit from ssr union all select 'catalog channel' as channel , 'catalog_page' || catalog_page_id as id , sales - , returns_angepasst + , returns_amt , profit from csr union all select 'web channel' as channel , 'web_site' || web_site_id as id , sales - , returns_angepasst + , returns_amt , profit from wsr ) x @@ -12011,52 +12011,54 @@ LIMIT 100""", limit 100""", 'DBMS': { 'MySQL': """WITH ssci AS ( - SELECT - ss_customer_sk AS customer_sk, - ss_item_sk AS item_sk - FROM store_sales, date_dim - WHERE ss_sold_date_sk = d_date_sk - AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 - GROUP BY ss_customer_sk, ss_item_sk -), -csci AS ( - SELECT - cs_bill_customer_sk AS customer_sk, - cs_item_sk AS item_sk - FROM catalog_sales, date_dim - WHERE cs_sold_date_sk = d_date_sk - AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 - GROUP BY cs_bill_customer_sk, cs_item_sk -), -combined AS ( - SELECT - customer_sk, - item_sk, - 1 AS is_store, - 0 AS is_catalog - FROM ssci - UNION ALL - SELECT - customer_sk, - item_sk, - 0 AS is_store, - 1 AS is_catalog - FROM csci -) -SELECT - SUM(CASE WHEN is_store = 1 AND is_catalog = 0 THEN 1 ELSE 0 END) AS store_only, - SUM(CASE WHEN is_store = 0 AND is_catalog = 1 THEN 1 ELSE 0 END) AS catalog_only, - SUM(CASE WHEN is_store = 1 AND is_catalog = 1 THEN 1 ELSE 0 END) AS store_and_catalog -FROM ( - SELECT - customer_sk, - item_sk, - MAX(is_store) AS is_store, - MAX(is_catalog) AS is_catalog - FROM combined - GROUP BY customer_sk, item_sk -) AS summary -LIMIT 100""", + SELECT + ss_customer_sk AS customer_sk, + ss_item_sk AS item_sk + FROM store_sales, date_dim + WHERE ss_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY ss_customer_sk, ss_item_sk + ), + csci AS ( + SELECT + cs_bill_customer_sk AS customer_sk, + cs_item_sk AS item_sk + FROM catalog_sales, date_dim + WHERE cs_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY cs_bill_customer_sk, cs_item_sk + ), + combined AS ( + SELECT + customer_sk, + item_sk, + 1 AS is_store, + 0 AS is_catalog + FROM ssci + WHERE customer_sk IS NOT NULL AND item_sk IS NOT NULL + UNION ALL + SELECT + customer_sk, + item_sk, + 0 AS is_store, + 1 AS is_catalog + FROM csci + WHERE customer_sk IS NOT NULL AND item_sk IS NOT NULL + ) + SELECT + SUM(CASE WHEN is_store = 1 AND is_catalog = 0 THEN 1 ELSE 0 END) AS store_only, + SUM(CASE WHEN is_store = 0 AND is_catalog = 1 THEN 1 ELSE 0 END) AS catalog_only, + SUM(CASE WHEN is_store = 1 AND is_catalog = 1 THEN 1 ELSE 0 END) AS store_and_catalog + FROM ( + SELECT + customer_sk, + item_sk, + MAX(is_store) AS is_store, + MAX(is_catalog) AS is_catalog + FROM combined + GROUP BY customer_sk, item_sk + ) AS summary + LIMIT 100""", }, 'parameter': { diff --git a/experiments/tpch/DatabaseService/initconstraints-tpch.sql b/experiments/tpch/DatabaseService/initconstraints-tpch.sql new file mode 100644 index 000000000..feb983b0f --- /dev/null +++ b/experiments/tpch/DatabaseService/initconstraints-tpch.sql @@ -0,0 +1,78 @@ +-- sccsid: @(#)dss.ri 2.1.8.1 +-- tpcd benchmark version 8.0 + +-- for table region +alter table public.region +add primary key (r_regionkey); + +-- for table nation +alter table public.nation +add primary key (n_nationkey); + +-- for table part +alter table public.part +add primary key (p_partkey); + +-- for table supplier +alter table public.supplier +add primary key (s_suppkey); + +-- for table partsupp +alter table public.partsupp +add primary key (ps_partkey,ps_suppkey); + +-- for table customer +alter table public.customer +add primary key (c_custkey); + +-- for table lineitem +alter table public.lineitem +add primary key (l_orderkey,l_linenumber); + +-- for table orders +alter table public.orders +add primary key (o_orderkey); + + + + +-- for table nation +alter table public.nation +add foreign key (n_regionkey) references region(r_regionkey); + +-- for table supplier +-- alter table public.supplier +-- add foreign key (s_nationkey) references nation(n_nationkey); + +-- for table customer +alter table public.customer +add foreign key (c_nationkey) references nation(n_nationkey); + +-- for table partsupp +alter table public.partsupp +add foreign key (ps_suppkey) references supplier(s_suppkey); + +alter table public.partsupp +add foreign key (ps_partkey) references part(p_partkey); + +-- for table orders +alter table public.orders +add foreign key (o_custkey) references customer(c_custkey); + +-- for table lineitem +alter table public.lineitem +add foreign key (l_orderkey) references orders(o_orderkey); + +alter table public.lineitem +add foreign key (l_partkey) references + part(p_partkey); + +alter table public.lineitem +add foreign key (l_suppkey) references + supplier(s_suppkey); + +alter table public.lineitem +add foreign key (l_partkey,l_suppkey) references + partsupp(ps_partkey,ps_suppkey); + + diff --git a/experiments/tpch/DatabaseService/initdata-tpch-SF1.sql b/experiments/tpch/DatabaseService/initdata-tpch-SF1.sql new file mode 100644 index 000000000..ab436f954 --- /dev/null +++ b/experiments/tpch/DatabaseService/initdata-tpch-SF1.sql @@ -0,0 +1,8 @@ +COPY public.customer FROM '/data/tpch/SF1/customer.tbl' DELIMITER '|' null ''; +COPY public.lineitem FROM '/data/tpch/SF1/lineitem.tbl' DELIMITER '|' null ''; +COPY public.nation FROM '/data/tpch/SF1/nation.tbl' DELIMITER '|' null ''; +COPY public.orders FROM '/data/tpch/SF1/orders.tbl' DELIMITER '|' null ''; +COPY public.part FROM '/data/tpch/SF1/part.tbl' DELIMITER '|' null ''; +COPY public.partsupp FROM '/data/tpch/SF1/partsupp.tbl' DELIMITER '|' null ''; +COPY public.region FROM '/data/tpch/SF1/region.tbl' DELIMITER '|' null ''; +COPY public.supplier FROM '/data/tpch/SF1/supplier.tbl' DELIMITER '|' null ''; diff --git a/experiments/tpch/DatabaseService/initdata-tpch-SF10.sql b/experiments/tpch/DatabaseService/initdata-tpch-SF10.sql new file mode 100644 index 000000000..5c3bb8503 --- /dev/null +++ b/experiments/tpch/DatabaseService/initdata-tpch-SF10.sql @@ -0,0 +1,8 @@ +COPY public.customer FROM '/data/tpch/SF10/customer.tbl' DELIMITER '|' null ''; +COPY public.lineitem FROM '/data/tpch/SF10/lineitem.tbl' DELIMITER '|' null ''; +COPY public.nation FROM '/data/tpch/SF10/nation.tbl' DELIMITER '|' null ''; +COPY public.orders FROM '/data/tpch/SF10/orders.tbl' DELIMITER '|' null ''; +COPY public.part FROM '/data/tpch/SF10/part.tbl' DELIMITER '|' null ''; +COPY public.partsupp FROM '/data/tpch/SF10/partsupp.tbl' DELIMITER '|' null ''; +COPY public.region FROM '/data/tpch/SF10/region.tbl' DELIMITER '|' null ''; +COPY public.supplier FROM '/data/tpch/SF10/supplier.tbl' DELIMITER '|' null ''; diff --git a/experiments/tpch/DatabaseService/initdata-tpch-SF100.sql b/experiments/tpch/DatabaseService/initdata-tpch-SF100.sql new file mode 100644 index 000000000..f83272dca --- /dev/null +++ b/experiments/tpch/DatabaseService/initdata-tpch-SF100.sql @@ -0,0 +1,8 @@ +COPY public.customer FROM '/data/tpch/SF100/customer.tbl' DELIMITER '|' null ''; +COPY public.lineitem FROM '/data/tpch/SF100/lineitem.tbl' DELIMITER '|' null ''; +COPY public.nation FROM '/data/tpch/SF100/nation.tbl' DELIMITER '|' null ''; +COPY public.orders FROM '/data/tpch/SF100/orders.tbl' DELIMITER '|' null ''; +COPY public.part FROM '/data/tpch/SF100/part.tbl' DELIMITER '|' null ''; +COPY public.partsupp FROM '/data/tpch/SF100/partsupp.tbl' DELIMITER '|' null ''; +COPY public.region FROM '/data/tpch/SF100/region.tbl' DELIMITER '|' null ''; +COPY public.supplier FROM '/data/tpch/SF100/supplier.tbl' DELIMITER '|' null ''; diff --git a/experiments/tpch/DatabaseService/initdata-tpch-SF30.sql b/experiments/tpch/DatabaseService/initdata-tpch-SF30.sql new file mode 100644 index 000000000..70c9d10b7 --- /dev/null +++ b/experiments/tpch/DatabaseService/initdata-tpch-SF30.sql @@ -0,0 +1,8 @@ +COPY public.customer FROM '/data/tpch/SF30/customer.tbl' DELIMITER '|' null ''; +COPY public.lineitem FROM '/data/tpch/SF30/lineitem.tbl' DELIMITER '|' null ''; +COPY public.nation FROM '/data/tpch/SF30/nation.tbl' DELIMITER '|' null ''; +COPY public.orders FROM '/data/tpch/SF30/orders.tbl' DELIMITER '|' null ''; +COPY public.part FROM '/data/tpch/SF30/part.tbl' DELIMITER '|' null ''; +COPY public.partsupp FROM '/data/tpch/SF30/partsupp.tbl' DELIMITER '|' null ''; +COPY public.region FROM '/data/tpch/SF30/region.tbl' DELIMITER '|' null ''; +COPY public.supplier FROM '/data/tpch/SF30/supplier.tbl' DELIMITER '|' null ''; diff --git a/experiments/tpch/DatabaseService/initindexes-tpch.sql b/experiments/tpch/DatabaseService/initindexes-tpch.sql new file mode 100644 index 000000000..94983b632 --- /dev/null +++ b/experiments/tpch/DatabaseService/initindexes-tpch.sql @@ -0,0 +1,56 @@ +-- indexes for foreign keys + +-- for table region +-- alter table public.region +-- add primary key (r_regionkey); + +-- for table nation +-- alter table public.nation +-- add primary key (n_nationkey); + +create index on public.nation (n_regionkey); + +-- for table part +-- alter table public.part +-- add primary key (p_partkey); + +-- for table supplier +-- alter table public.supplier +-- add primary key (s_suppkey); + +-- create index on public.supplier (s_nationkey); + +-- for table partsupp +-- alter table public.partsupp +-- add primary key (ps_partkey,ps_suppkey); + +-- for table customer +-- alter table public.customer +-- add primary key (c_custkey); + +create index on public.customer (c_nationkey); + +-- for table partsupp +create index on public.partsupp (ps_suppkey); + +create index on public.partsupp (ps_partkey); + +-- for table lineitem +-- alter table public.lineitem +-- add primary key (l_orderkey,l_linenumber); + +-- for table orders +-- alter table public.orders +-- add primary key (o_orderkey); + +create index on public.orders (o_custkey); + +-- for table lineitem +create index on public.lineitem (l_orderkey); + +-- create index on public.lineitem (l_partkey); + +-- create index on public.lineitem (l_suppkey); + +create index on public.lineitem (l_partkey,l_suppkey); + diff --git a/experiments/tpch/DatabaseService/initschema-tpch.sql b/experiments/tpch/DatabaseService/initschema-tpch.sql new file mode 100644 index 000000000..082159c3d --- /dev/null +++ b/experiments/tpch/DatabaseService/initschema-tpch.sql @@ -0,0 +1,70 @@ +-- sccsid: @(#)dss.ddl 2.1.8.1 +create table public.nation ( n_nationkey integer not null, + n_name char(25) not null, + n_regionkey integer not null, + n_comment varchar(152)); + +create table public.region ( r_regionkey integer not null, + r_name char(25) not null, + r_comment varchar(152)); + +create table public.part ( p_partkey integer not null, + p_name varchar(55) not null, + p_mfgr char(25) not null, + p_brand char(10) not null, + p_type varchar(25) not null, + p_size integer not null, + p_container char(10) not null, + p_retailprice decimal(15,2) not null, + p_comment varchar(23) not null ); + +create table public.supplier ( s_suppkey integer not null, + s_name char(25) not null, + s_address varchar(40) not null, + s_nationkey integer not null, + s_phone char(15) not null, + s_acctbal decimal(15,2) not null, + s_comment varchar(101) not null); + +create table public.partsupp ( ps_partkey integer not null, + ps_suppkey integer not null, + ps_availqty integer not null, + ps_supplycost decimal(15,2) not null, + ps_comment varchar(199) not null ); + +create table public.customer ( c_custkey integer not null, + c_name varchar(25) not null, + c_address varchar(40) not null, + c_nationkey integer not null, + c_phone char(15) not null, + c_acctbal decimal(15,2) not null, + c_mktsegment char(10) not null, + c_comment varchar(117) not null); + +create table public.orders ( o_orderkey integer not null, + o_custkey integer not null, + o_orderstatus char(1) not null, + o_totalprice decimal(15,2) not null, + o_orderdate date not null, + o_orderpriority char(15) not null, + o_clerk char(15) not null, + o_shippriority integer not null, + o_comment varchar(79) not null); + +create table public.lineitem ( l_orderkey integer not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15,2) not null, + l_extendedprice decimal(15,2) not null, + l_discount decimal(15,2) not null, + l_tax decimal(15,2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); + diff --git a/experiments/tpch/DatabaseService/initschemadummy-tpch.sql b/experiments/tpch/DatabaseService/initschemadummy-tpch.sql new file mode 100644 index 000000000..98ed59008 --- /dev/null +++ b/experiments/tpch/DatabaseService/initschemadummy-tpch.sql @@ -0,0 +1,78 @@ +-- sccsid: @(#)dss.ddl 2.1.8.1 +create table public.nation ( n_nationkey integer not null, + n_name char(25) not null, + n_regionkey integer not null, + n_comment varchar(152), + dummy char(1)); + +create table public.region ( r_regionkey integer not null, + r_name char(25) not null, + r_comment varchar(152), + dummy char(1)); + +create table public.part ( p_partkey integer not null, + p_name varchar(55) not null, + p_mfgr char(25) not null, + p_brand char(10) not null, + p_type varchar(25) not null, + p_size integer not null, + p_container char(10) not null, + p_retailprice decimal(15,2) not null, + p_comment varchar(23) not null , + dummy char(1)); + +create table public.supplier ( s_suppkey integer not null, + s_name char(25) not null, + s_address varchar(40) not null, + s_nationkey integer not null, + s_phone char(15) not null, + s_acctbal decimal(15,2) not null, + s_comment varchar(101) not null, + dummy char(1)); + +create table public.partsupp ( ps_partkey integer not null, + ps_suppkey integer not null, + ps_availqty integer not null, + ps_supplycost decimal(15,2) not null, + ps_comment varchar(199) not null , + dummy char(1)); + +create table public.customer ( c_custkey integer not null, + c_name varchar(25) not null, + c_address varchar(40) not null, + c_nationkey integer not null, + c_phone char(15) not null, + c_acctbal decimal(15,2) not null, + c_mktsegment char(10) not null, + c_comment varchar(117) not null, + dummy char(1)); + +create table public.orders ( o_orderkey integer not null, + o_custkey integer not null, + o_orderstatus char(1) not null, + o_totalprice decimal(15,2) not null, + o_orderdate date not null, + o_orderpriority char(15) not null, + o_clerk char(15) not null, + o_shippriority integer not null, + o_comment varchar(79) not null, + dummy char(1)); + +create table public.lineitem ( l_orderkey integer not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15,2) not null, + l_extendedprice decimal(15,2) not null, + l_discount decimal(15,2) not null, + l_tax decimal(15,2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null, + dummy char(1)); + diff --git a/experiments/tpch/DatabaseService/initstatistics-tpch.sql b/experiments/tpch/DatabaseService/initstatistics-tpch.sql new file mode 100644 index 000000000..b36fc1dd0 --- /dev/null +++ b/experiments/tpch/DatabaseService/initstatistics-tpch.sql @@ -0,0 +1,8 @@ +ANALYZE VERBOSE public.customer; +ANALYZE VERBOSE public.lineitem; +ANALYZE VERBOSE public.nation; +ANALYZE VERBOSE public.orders; +ANALYZE VERBOSE public.part; +ANALYZE VERBOSE public.partsupp; +ANALYZE VERBOSE public.region; +ANALYZE VERBOSE public.supplier; diff --git a/experiments/ycsb/DatabaseService/initschema-ycsb.sql b/experiments/ycsb/DatabaseService/initschema-ycsb.sql new file mode 100644 index 000000000..1224e7996 --- /dev/null +++ b/experiments/ycsb/DatabaseService/initschema-ycsb.sql @@ -0,0 +1,14 @@ + +-- SELECT "CREATE new usertable"; +CREATE TABLE IF NOT EXISTS usertable ( + YCSB_KEY VARCHAR(255) PRIMARY KEY, + FIELD0 TEXT, FIELD1 TEXT, FIELD2 TEXT, FIELD3 TEXT, + FIELD4 TEXT, FIELD5 TEXT, FIELD6 TEXT, FIELD7 TEXT, + FIELD8 TEXT, FIELD9 TEXT); + + +SELECT current_timestamp AS "Time after creation"; + + +SELECT COUNT(*) AS "Number of rows in usertable" FROM usertable; + diff --git a/experiments/ycsb/YugabyteDB/initschema-ycsb.sql b/experiments/ycsb/YugabyteDB/initschema-ycsb.sql index d92b7a53d..1bbcb9dd5 100644 --- a/experiments/ycsb/YugabyteDB/initschema-ycsb.sql +++ b/experiments/ycsb/YugabyteDB/initschema-ycsb.sql @@ -5,7 +5,7 @@ -- SELECT 'DROP old usertable' as message; -DROP TABLE IF EXISTS public.usertable CASCADE; +-- DROP TABLE IF EXISTS public.usertable CASCADE; -- wait 60 seconds -- SELECT 'Wait 60 s'; diff --git a/experiments/ycsb/queries.config b/experiments/ycsb/queries.config index bf3c7c325..ef4063dd6 100644 --- a/experiments/ycsb/queries.config +++ b/experiments/ycsb/queries.config @@ -1,6 +1,6 @@ { 'name': "YCSB Queries", - 'intro': "This includes no queries. YCSB runs the benchmark", + 'intro': "YCSB tool runs the benchmark.", 'factor': 'mean', 'connectionmanagement': { 'timeout': 600, diff --git a/images/benchmarker_dbmsbenchmarker/Dockerfile_template b/images/benchmarker_dbmsbenchmarker/Dockerfile_template index 8cde878d1..2fbf7f484 100644 --- a/images/benchmarker_dbmsbenchmarker/Dockerfile_template +++ b/images/benchmarker_dbmsbenchmarker/Dockerfile_template @@ -1,4 +1,4 @@ -FROM python:3.12.5 +FROM python:3.12.8 # does not compile numpy correctly # FROM python:3.13-rc-slim @@ -12,6 +12,7 @@ ENV DBMSBENCHMARKER_DEV 0 ENV DBMSBENCHMARKER_NOW 0 ENV DBMSBENCHMARKER_START 0 ENV DBMSBENCHMARKER_SHUFFLE_QUERIES False +ENV DBMSBENCHMARKER_TESTRUN 0 RUN apt update RUN apt install default-jre -y @@ -59,10 +60,8 @@ RUN cp mariadb-java-client-3.1.0.jar jars/mariadb-java-client-3.1.0.jar ######### Specific version of MonetDB JDBC ######### #RUN wget https://www.monetdb.org/downloads/Java/archive/monetdb-jdbc-3.2.jre8.jar --no-check-certificate #RUN cp monetdb-jdbc-3.2.jre8.jar jars/monetdb-jdbc-3.2.jre8.jar -RUN wget https://www.monetdb.org/downloads/Java/archive/monetdb-jdbc-3.3.jre8.jar --no-check-certificate -RUN cp monetdb-jdbc-3.3.jre8.jar jars/monetdb-jdbc-3.3.jre8.jar - -######### Specific version of MonetDB JDBC ######### +#RUN wget https://www.monetdb.org/downloads/Java/archive/monetdb-jdbc-3.3.jre8.jar --no-check-certificate +#RUN cp monetdb-jdbc-3.3.jre8.jar jars/monetdb-jdbc-3.3.jre8.jar RUN wget https://www.monetdb.org/downloads/Java/monetdb-jdbc-3.3.jre8.jar --no-check-certificate RUN cp monetdb-jdbc-3.3.jre8.jar jars/monetdb-jdbc-3.3.jre8.jar @@ -85,6 +84,13 @@ RUN cp jdbc-yugabytedb-42.3.5-yb-2.jar jars/jdbc-yugabytedb-42.3.5-yb-2.jar RUN cd /tmp; wget http://download.redis.io/redis-stable.tar.gz; tar xvzf redis-stable.tar.gz; cd redis-stable; make; cp src/redis-cli /usr/local/bin/; chmod 755 /usr/local/bin/redis-cli +RUN mkdir -p tpc-ds + +COPY ./connections.config ./tpc-ds/connections.config +COPY ./queries.config ./tpc-ds/queries.config +RUN ["chmod", "+x", "./tpc-ds/connections.config"] +RUN ["chmod", "+x", "./tpc-ds/queries.config"] + COPY ./benchmarker.sh ./benchmarker.sh RUN ["chmod", "+x", "./benchmarker.sh"] diff --git a/images/benchmarker_dbmsbenchmarker/Dockerfile_template_alpine b/images/benchmarker_dbmsbenchmarker/Dockerfile_template_alpine new file mode 100644 index 000000000..c660e3d8e --- /dev/null +++ b/images/benchmarker_dbmsbenchmarker/Dockerfile_template_alpine @@ -0,0 +1,163 @@ +#FROM python:3.12.5 +FROM alpine:3.15 + +WORKDIR /usr/src/app + +ENV DBMSBENCHMARKER_SLEEP 30 +ENV DBMSBENCHMARKER_RECREATE_PARAMETER 0 +ENV DBMSBENCHMARKER_VERBOSE 0 +ENV DBMSBENCHMARKER_DEV 0 +ENV DBMSBENCHMARKER_NOW 0 +ENV DBMSBENCHMARKER_START 0 +ENV DBMSBENCHMARKER_SHUFFLE_QUERIES False + +# does not compile numpy correctly +# FROM python:3.13-rc-slim + +# Set environment variables for Python installation +ENV PYTHON_VERSION=3.12.8 +ENV PYTHON_SRC=https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz + +# Install necessary packages +RUN apk add --no-cache \ + build-base \ + linux-headers \ + libffi-dev \ + openssl-dev \ + zlib-dev \ + bzip2-dev \ + xz-dev \ + readline-dev \ + sqlite-dev \ + ncurses-dev \ + util-linux-dev \ + libressl-dev \ + tar \ + curl + +# Download, extract, and compile Python +RUN curl -fSL $PYTHON_SRC -o /tmp/Python.tgz && \ + tar -xzf /tmp/Python.tgz -C /tmp && \ + cd /tmp/Python-$PYTHON_VERSION && \ + ./configure --enable-optimizations && \ + make && \ + make install && \ + rm -rf /tmp/Python.tgz /tmp/Python-$PYTHON_VERSION + +# Verify Python installation +RUN python3.12 --version && pip3 --version + +# Set default python and pip to Python 3.12 +RUN ln -sf /usr/local/bin/python3.12 /usr/local/bin/python && \ + ln -sf /usr/local/bin/pip3 /usr/local/bin/pip + +WORKDIR /usr/src/app + +# Update package index and install necessary packages +RUN apk update && apk add --no-cache \ + openjdk11-jre \ + zip \ + nano + +RUN mkdir /results + +RUN apk add --no-cache bash + +SHELL ["/bin/bash", "-c"] + +ENV VIRTUAL_ENV=/opt/venv + +# CMD sleep3600 + +RUN python -m pip install virtualenv +RUN python -m venv $VIRTUAL_ENV +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +RUN apk add --no-cache git +RUN JAVA_HOME=/usr/lib/jvm/java-1.8.0/ pip install --no-cache-dir --upgrade --force-reinstall git+https://github.com/Beuth-Erdelt/DBMS-Benchmarker@{version} +RUN git clone https://github.com/Beuth-Erdelt/DBMS-Benchmarker --branch {version} + +WORKDIR /usr/src/app/DBMS-Benchmarker + +ENV DBMSBENCHMARKER_SLEEP 30 +ENV DBMSBENCHMARKER_RECREATE_PARAMETER 0 +ENV DBMSBENCHMARKER_VERBOSE 0 +ENV DBMSBENCHMARKER_DEV 0 +ENV DBMSBENCHMARKER_NOW 0 +ENV DBMSBENCHMARKER_START 0 +ENV DBMSBENCHMARKER_SHUFFLE_QUERIES False + +RUN mkdir -p jars/ + +######### Specific version of PostgreSQL JDBC ######### +RUN wget https://jdbc.postgresql.org/download/postgresql-42.5.0.jar --no-check-certificate +RUN cp postgresql-42.5.0.jar jars/postgresql-42.5.0.jar + +######### Specific version of MySQL JDBC ######### +RUN wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-j-8.0.31.tar.gz +RUN tar -zxvf mysql-connector-j-8.0.31.tar.gz +RUN cp mysql-connector-j-8.0.31/mysql-connector-j-8.0.31.jar jars/mysql-connector-j-8.0.31.jar + +######### Specific version of MariaDB JDBC ######### +RUN wget https://dlm.mariadb.com/2678616/Connectors/java/connector-java-3.1.0/mariadb-java-client-3.1.0.jar +RUN cp mariadb-java-client-3.1.0.jar jars/mariadb-java-client-3.1.0.jar + +######### Specific version of MonetDB JDBC ######### +RUN wget https://www.monetdb.org/downloads/Java/archive/monetdb-jdbc-3.3.jre8.jar --no-check-certificate +RUN cp monetdb-jdbc-3.3.jre8.jar jars/monetdb-jdbc-3.3.jre8.jar + +######### Specific version of SingleStore JDBC ######### +RUN wget https://github.com/memsql/S2-JDBC-Connector/releases/download/v1.1.4/singlestore-jdbc-client-1.1.4.jar +RUN cp singlestore-jdbc-client-1.1.4.jar jars/singlestore-jdbc-client-1.1.4.jar + +######### Specific version of Kinetica JDBC ######### +RUN wget https://github.com/kineticadb/kinetica-client-jdbc/archive/refs/tags/v7.1.8.7.tar.gz +RUN tar -zxvf v7.1.8.7.tar.gz +RUN cp kinetica-client-jdbc-7.1.8.7/kinetica-jdbc-7.1.8.7-jar-with-dependencies.jar jars/kinetica-jdbc-7.1.8.7-jar-with-dependencies.jar + +######### Specific version of YugabyteDB JDBC ######### +RUN wget https://github.com/yugabyte/pgjdbc/releases/download/v42.3.5-yb-2/jdbc-yugabytedb-42.3.5-yb-2.jar +RUN cp jdbc-yugabytedb-42.3.5-yb-2.jar jars/jdbc-yugabytedb-42.3.5-yb-2.jar + + + +######### Redis Client - Download and compile ######### +RUN apk add --no-cache make cmake gcc libc-dev +RUN cd /tmp; wget http://download.redis.io/redis-stable.tar.gz; tar xvzf redis-stable.tar.gz; cd redis-stable; make; cp src/redis-cli /usr/local/bin/; chmod 755 /usr/local/bin/redis-cli + + +COPY ./benchmarker.sh ./benchmarker.sh +RUN ["chmod", "+x", "./benchmarker.sh"] + +RUN apk add gcompat + +RUN mkdir -p tpc-ds +COPY ./connections.config ./tpc-ds/connections.config +COPY ./queries.config ./tpc-ds/queries.config +RUN ["chmod", "+x", "./tpc-ds/connections.config"] +RUN ["chmod", "+x", "./tpc-ds/queries.config"] + +CMD ["/bin/bash", "-c", "./benchmarker.sh"] + +#CMD git pull; python ./benchmark.py run -b -d -w connection \ +# -f /results/$DBMSBENCHMARKER_CODE \ +# -r /results/$DBMSBENCHMARKER_CODE \ +# -mps \ +# -cs -sf $DBMSBENCHMARKER_CONNECTION \ +# -ms $DBMSBENCHMARKER_CLIENT \ +# -sl $DBMSBENCHMARKER_SLEEP \ +# -st "$DBMSBENCHMARKER_START" \ +# -c "$DBMSBENCHMARKER_CONNECTION" \ +# -ca "$DBMSBENCHMARKER_ALIAS" \ +# -cf ${DBMSBENCHMARKER_CONNECTION}.config \ + +# -f config folder +# -r result folder +# -mps monitor per stream (not per query) +# -cs -sf subfolder per dbms (connection) +# -ms max number of subfolders +# -sl sleep seconds before start benchmarking +# -st start time for operating +# -c name of dbms (connection) to benchmark +# -ca alias for dbms (connection) to benchmark +# -cf config of dbms (connection) diff --git a/images/benchmarker_dbmsbenchmarker/Dockerfile_v0.14.6 b/images/benchmarker_dbmsbenchmarker/Dockerfile_v0.14.6 index 2b8f5854d..a0a15d54b 100644 --- a/images/benchmarker_dbmsbenchmarker/Dockerfile_v0.14.6 +++ b/images/benchmarker_dbmsbenchmarker/Dockerfile_v0.14.6 @@ -1,4 +1,4 @@ -FROM python:3.12.5 +FROM python:3.12.8 # does not compile numpy correctly # FROM python:3.13-rc-slim @@ -12,6 +12,7 @@ ENV DBMSBENCHMARKER_DEV 0 ENV DBMSBENCHMARKER_NOW 0 ENV DBMSBENCHMARKER_START 0 ENV DBMSBENCHMARKER_SHUFFLE_QUERIES False +ENV DBMSBENCHMARKER_TESTRUN 0 RUN apt update RUN apt install default-jre -y @@ -85,6 +86,13 @@ RUN cp jdbc-yugabytedb-42.3.5-yb-2.jar jars/jdbc-yugabytedb-42.3.5-yb-2.jar RUN cd /tmp; wget http://download.redis.io/redis-stable.tar.gz; tar xvzf redis-stable.tar.gz; cd redis-stable; make; cp src/redis-cli /usr/local/bin/; chmod 755 /usr/local/bin/redis-cli +RUN mkdir -p tpc-ds + +COPY ./connections.config ./tpc-ds/connections.config +COPY ./queries.config ./tpc-ds/queries.config +RUN ["chmod", "+x", "./tpc-ds/connections.config"] +RUN ["chmod", "+x", "./tpc-ds/queries.config"] + COPY ./benchmarker.sh ./benchmarker.sh RUN ["chmod", "+x", "./benchmarker.sh"] diff --git a/images/benchmarker_dbmsbenchmarker/benchmarker.sh b/images/benchmarker_dbmsbenchmarker/benchmarker.sh index 22ef6e525..16a89dce3 100644 --- a/images/benchmarker_dbmsbenchmarker/benchmarker.sh +++ b/images/benchmarker_dbmsbenchmarker/benchmarker.sh @@ -12,6 +12,17 @@ echo "BEXHOMA_EXPERIMENT_RUN:$BEXHOMA_EXPERIMENT_RUN" echo "BEXHOMA_CONFIGURATION:$BEXHOMA_CONFIGURATION" echo "BEXHOMA_CLIENT:$BEXHOMA_CLIENT" +######################## Test Run ######################## +# This runs TPC-DS against MonetDB at localhost +# usage: docker run --rm --network host -e DBMSBENCHMARKER_TESTRUN=1 bexhoma/benchmarker_dbmsbenchmarker:v0.14.6 +if test "$DBMSBENCHMARKER_TESTRUN" != "0" +then + #ls -lh ./jars/ + #python ./benchmark.py --help + python ./benchmark.py -f tpc-ds -e yes -b run + exit 0 +fi + ######################## Wait for synched starting time ######################## echo "benchmark started at $DBMSBENCHMARKER_NOW" echo "benchmark should wait until $DBMSBENCHMARKER_START" diff --git a/images/benchmarker_dbmsbenchmarker/connections.config b/images/benchmarker_dbmsbenchmarker/connections.config new file mode 100644 index 000000000..de07b46c2 --- /dev/null +++ b/images/benchmarker_dbmsbenchmarker/connections.config @@ -0,0 +1,17 @@ +[ + { + 'name': 'MonetDB', + 'info': 'This is a demo of MonetDB', + 'active': True, + 'dialect': 'MonetDB', + 'JDBC': { + #'driver': 'nl.cwi.monetdb.jdbc.MonetDriver', + 'driver': 'org.monetdb.jdbc.MonetDriver', + 'url': 'jdbc:monetdb://localhost:50000/demo?schema=tpcds&so_timeout=900000', + 'auth': ['monetdb', 'monetdb'], + 'jar': 'jars/monetdb-jdbc-3.3.jre8.jar', + #'options': ["-Xms64m", "-Xmx64m"], + }, + 'init_SQL': "SET schema tpcds", + }, +] diff --git a/images/benchmarker_dbmsbenchmarker/queries.config b/images/benchmarker_dbmsbenchmarker/queries.config new file mode 100644 index 000000000..4f35ab217 --- /dev/null +++ b/images/benchmarker_dbmsbenchmarker/queries.config @@ -0,0 +1,12220 @@ +{ + 'name': "The TPC-DS Queries", + 'intro': "This includes the reading queries of TPC-DS.", + 'factor': 'mean', + 'connectionmanagement': { + 'timeout': 900, + #'numProcesses': 1, + #'runsPerConnection': 0, + #'singleConnection': True + }, + 'queries': + [ + { + 'title': "TPC-DS Q1", + 'query': """with customer_total_return as + (select sr_customer_sk as ctr_customer_sk + ,sr_store_sk as ctr_store_sk + ,sum({AGG_FIELD}) as ctr_total_return + from store_returns + ,date_dim + where sr_returned_date_sk = d_date_sk + and d_year ={YEAR} + group by sr_customer_sk + ,sr_store_sk) + select c_customer_id + from customer_total_return ctr1 + ,store + ,customer + where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 + from customer_total_return ctr2 + where ctr1.ctr_store_sk = ctr2.ctr_store_sk) + and s_store_sk = ctr1.ctr_store_sk + and s_state = '{STATE}' + and ctr1.ctr_customer_sk = c_customer_sk + order by c_customer_id + limit 100""", + 'parameter': + { + 'AGG_FIELD': { + 'type': "list", + 'range': ["SR_RETURN_AMT","SR_FEE","SR_REFUNDED_CASH","SR_RETURN_AMT_INC_TAX","SR_REVERSED_CHARGE","SR_STORE_CREDIT","SR_RETURN_TAX"] + }, + 'STATE': { + 'type': "list", + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DC","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q2", + 'query': """with wscs as + (select sold_date_sk + ,sales_price + from (select ws_sold_date_sk sold_date_sk + ,ws_ext_sales_price sales_price + from web_sales + union all + select cs_sold_date_sk sold_date_sk + ,cs_ext_sales_price sales_price + from catalog_sales) x ), + wswscs as + (select d_week_seq, + sum(case when (d_day_name='Sunday') then sales_price else null end) sun_sales, + sum(case when (d_day_name='Monday') then sales_price else null end) mon_sales, + sum(case when (d_day_name='Tuesday') then sales_price else null end) tue_sales, + sum(case when (d_day_name='Wednesday') then sales_price else null end) wed_sales, + sum(case when (d_day_name='Thursday') then sales_price else null end) thu_sales, + sum(case when (d_day_name='Friday') then sales_price else null end) fri_sales, + sum(case when (d_day_name='Saturday') then sales_price else null end) sat_sales + from wscs + ,date_dim + where d_date_sk = sold_date_sk + group by d_week_seq) + select d_week_seq1 + ,round(sun_sales1/sun_sales2,2) sun_sales + ,round(mon_sales1/mon_sales2,2) mon_sales + ,round(tue_sales1/tue_sales2,2) tue_sales + ,round(wed_sales1/wed_sales2,2) wed_sales + ,round(thu_sales1/thu_sales2,2) thu_sales + ,round(fri_sales1/fri_sales2,2) fri_sales + ,round(sat_sales1/sat_sales2,2) sat_sales + from + (select wswscs.d_week_seq d_week_seq1 + ,sun_sales sun_sales1 + ,mon_sales mon_sales1 + ,tue_sales tue_sales1 + ,wed_sales wed_sales1 + ,thu_sales thu_sales1 + ,fri_sales fri_sales1 + ,sat_sales sat_sales1 + from wswscs,date_dim + where date_dim.d_week_seq = wswscs.d_week_seq and + d_year =1998) y, + (select wswscs.d_week_seq d_week_seq2 + ,sun_sales sun_sales2 + ,mon_sales mon_sales2 + ,tue_sales tue_sales2 + ,wed_sales wed_sales2 + ,thu_sales thu_sales2 + ,fri_sales fri_sales2 + ,sat_sales sat_sales2 + from wswscs + ,date_dim + where date_dim.d_week_seq = wswscs.d_week_seq and + d_year =1999) z + where d_week_seq1=d_week_seq2-53 + order by d_week_seq1""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2001] + } + , + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q3", + 'query': """select dt.d_year + ,item.i_brand_id AS brand_id + ,item.i_brand AS brand + ,sum({AGGC}) AS sum_agg + from date_dim dt + ,store_sales + ,item + where dt.d_date_sk = store_sales.ss_sold_date_sk + and store_sales.ss_item_sk = item.i_item_sk + and item.i_manufact_id = {MANUFACT} + and dt.d_moy={MONTH} + group by dt.d_year + ,brand + ,brand_id + order by dt.d_year + ,sum_agg desc + ,brand_id + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'MANUFACT': { + 'type': "integer", + 'range': [1,1000] + }, + 'AGGC': { + 'type': "list", + 'range': ["ss_ext_sales_price","ss_sales_price","ss_ext_discount_amt","ss_net_profit"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q4", + 'query': """with year_total as ( + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,c_preferred_cust_flag customer_preferred_cust_flag + ,c_birth_country customer_birth_country + ,c_login customer_login + ,c_email_address customer_email_address + ,d_year dyear + ,sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total + ,'s' sale_type + from customer + ,store_sales + ,date_dim + where c_customer_sk = ss_customer_sk + and ss_sold_date_sk = d_date_sk + group by c_customer_id + ,c_first_name + ,c_last_name + ,c_preferred_cust_flag + ,c_birth_country + ,c_login + ,c_email_address + ,d_year + union all + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,c_preferred_cust_flag customer_preferred_cust_flag + ,c_birth_country customer_birth_country + ,c_login customer_login + ,c_email_address customer_email_address + ,d_year dyear + ,sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt)+cs_ext_sales_price)/2) ) year_total + ,'c' sale_type + from customer + ,catalog_sales + ,date_dim + where c_customer_sk = cs_bill_customer_sk + and cs_sold_date_sk = d_date_sk + group by c_customer_id + ,c_first_name + ,c_last_name + ,c_preferred_cust_flag + ,c_birth_country + ,c_login + ,c_email_address + ,d_year + union all + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,c_preferred_cust_flag customer_preferred_cust_flag + ,c_birth_country customer_birth_country + ,c_login customer_login + ,c_email_address customer_email_address + ,d_year dyear + ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2) ) year_total + ,'w' sale_type + from customer + ,web_sales + ,date_dim + where c_customer_sk = ws_bill_customer_sk + and ws_sold_date_sk = d_date_sk + group by c_customer_id + ,c_first_name + ,c_last_name + ,c_preferred_cust_flag + ,c_birth_country + ,c_login + ,c_email_address + ,d_year + ) + select + t_s_secyear.customer_id + ,t_s_secyear.customer_first_name + ,t_s_secyear.customer_last_name + ,{SELECTONE} + from year_total t_s_firstyear + ,year_total t_s_secyear + ,year_total t_c_firstyear + ,year_total t_c_secyear + ,year_total t_w_firstyear + ,year_total t_w_secyear + where t_s_secyear.customer_id = t_s_firstyear.customer_id + and t_s_firstyear.customer_id = t_c_secyear.customer_id + and t_s_firstyear.customer_id = t_c_firstyear.customer_id + and t_s_firstyear.customer_id = t_w_firstyear.customer_id + and t_s_firstyear.customer_id = t_w_secyear.customer_id + and t_s_firstyear.sale_type = 's' + and t_c_firstyear.sale_type = 'c' + and t_w_firstyear.sale_type = 'w' + and t_s_secyear.sale_type = 's' + and t_c_secyear.sale_type = 'c' + and t_w_secyear.sale_type = 'w' + and t_s_firstyear.dyear = {YEAR} + and t_s_secyear.dyear = {YEAR}+1 + and t_c_firstyear.dyear = {YEAR} + and t_c_secyear.dyear = {YEAR}+1 + and t_w_firstyear.dyear = {YEAR} + and t_w_secyear.dyear = {YEAR}+1 + and t_s_firstyear.year_total > 0 + and t_c_firstyear.year_total > 0 + and t_w_firstyear.year_total > 0 + and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end + > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end + and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end + > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end + order by t_s_secyear.customer_id + ,t_s_secyear.customer_first_name + ,t_s_secyear.customer_last_name + ,{SELECTONE} + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2001] + }, + 'SELECTONE': { + 'type': "list", + 'range': ["t_s_secyear.customer_preferred_cust_flag","t_s_secyear.customer_birth_country","t_s_secyear.customer_login","t_s_secyear.customer_email_address"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q5", + 'query': """ with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , concat('store', s_store_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , concat('catalog_page', cp_catalog_page_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , concat('web_site', web_site_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by channel, id with rollup + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'DBMS': { + 'MariaDB': """with total as ( + with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , concat('store', s_store_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , concat('catalog_page', cp_catalog_page_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , concat('web_site', web_site_id) as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by channel, id with rollup + ) + select * from total + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'MonetDB': """ with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || s_store_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || cp_catalog_page_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'PostgreSQL': """ with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || s_store_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || cp_catalog_page_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by rollup(channel, id) + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'Exasol': """ with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || s_store_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || cp_catalog_page_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'MemSQL': """ with ssr as + (select s_store_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ss_store_sk as store_sk, + ss_sold_date_sk as date_sk, + ss_ext_sales_price as sales_price, + ss_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from store_sales + union all + select sr_store_sk as store_sk, + sr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + sr_return_amt as return_amt, + sr_net_loss as net_loss + from store_returns + ) salesreturns, + date_dim, + store + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and store_sk = s_store_sk + group by s_store_id) + , + csr as + (select cp_catalog_page_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select cs_catalog_page_sk as page_sk, + cs_sold_date_sk as date_sk, + cs_ext_sales_price as sales_price, + cs_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from catalog_sales + union all + select cr_catalog_page_sk as page_sk, + cr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + cr_return_amount as return_amt, + cr_net_loss as net_loss + from catalog_returns + ) salesreturns, + date_dim, + catalog_page + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and page_sk = cp_catalog_page_sk + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(sales_price) as sales, + sum(profit) as profit, + sum(return_amt) as returns_angepasst, + sum(net_loss) as profit_loss + from + ( select ws_web_site_sk as wsr_web_site_sk, + ws_sold_date_sk as date_sk, + ws_ext_sales_price as sales_price, + ws_net_profit as profit, + cast(0 as decimal(7,2)) as return_amt, + cast(0 as decimal(7,2)) as net_loss + from web_sales + union all + select ws_web_site_sk as wsr_web_site_sk, + wr_returned_date_sk as date_sk, + cast(0 as decimal(7,2)) as sales_price, + cast(0 as decimal(7,2)) as profit, + wr_return_amt as return_amt, + wr_net_loss as net_loss + from web_returns left outer join web_sales on + ( wr_item_sk = ws_item_sk + and wr_order_number = ws_order_number) + ) salesreturns, + date_dim, + web_site + where date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '14' day) + and wsr_web_site_sk = web_site_sk + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || s_store_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || cp_catalog_page_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from wsr + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""" + }, + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,30] + }, + 'MONTH': { + 'type': "integer", + 'range': [8,8] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q6", + 'query': """select a.ca_state AS state, count(*) cnt + from customer_address a + ,customer c + ,store_sales s + ,date_dim d + ,item i + where a.ca_address_sk = c.c_current_addr_sk + and c.c_customer_sk = s.ss_customer_sk + and s.ss_sold_date_sk = d.d_date_sk + and s.ss_item_sk = i.i_item_sk + and d.d_month_seq = + (select distinct (d_month_seq) + from date_dim + where d_year = {YEAR} + and d_moy = {MONTH} ) + and i.i_current_price > 1.2 * + (select avg(j.i_current_price) + from item j + where j.i_category = i.i_category) + group by a.ca_state + having count(*) >= 10 + order by cnt, a.ca_state + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q7", + 'query': """ select i_item_id, + avg(ss_quantity) agg1, + avg(100.*ss_list_price)/100. agg2, + avg(ss_coupon_amt) agg3, + avg(ss_sales_price) agg4 + from store_sales, customer_demographics, date_dim, item, promotion + where ss_sold_date_sk = d_date_sk and + ss_item_sk = i_item_sk and + ss_cdemo_sk = cd_demo_sk and + ss_promo_sk = p_promo_sk and + cd_gender = '{GEN}' and + cd_marital_status = '{MS}' and + cd_education_status = '{ES}' and + (p_channel_email = 'N' or p_channel_event = 'N') and + d_year = {YEAR} + group by i_item_id + order by i_item_id + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MS': { + 'type': "list", + 'range': ["M","S","D","W","U"] + }, + 'GEN': { + 'type': "list", + 'range': ["M","F"] + }, + 'ES': { + 'type': "list", + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q8", + 'query': """ select s_store_name + ,sum(ss_net_profit) as sum_net_profit + from store_sales + ,date_dim + ,store, + (select ca_zip + from ( + SELECT substr(ca_zip,1,5) ca_zip + FROM customer_address + WHERE substr(ca_zip,1,5) IN ( + '{ZIP1}','{ZIP2}','{ZIP3}','{ZIP4}','{ZIP5}','{ZIP6}', + '{ZIP7}','{ZIP8}','{ZIP9}','{ZIP10}','{ZIP11}', + '{ZIP12}','{ZIP13}','{ZIP14}','{ZIP15}','{ZIP16}', + '{ZIP17}','{ZIP18}','{ZIP19}','{ZIP20}','{ZIP21}', + '{ZIP22}','{ZIP23}','{ZIP24}','{ZIP25}','{ZIP26}', + '{ZIP27}','{ZIP28}','{ZIP29}','{ZIP30}','{ZIP31}', + '{ZIP32}','{ZIP33}','{ZIP34}','{ZIP35}','{ZIP36}', + '{ZIP37}','{ZIP38}','{ZIP39}','{ZIP40}','{ZIP41}', + '{ZIP42}','{ZIP43}','{ZIP44}','{ZIP45}','{ZIP46}', + '{ZIP47}','{ZIP48}','{ZIP49}','{ZIP50}','{ZIP51}', + '{ZIP52}','{ZIP53}','{ZIP54}','{ZIP55}','{ZIP56}', + '{ZIP57}','{ZIP58}','{ZIP59}','{ZIP60}','{ZIP61}', + '{ZIP62}','{ZIP63}','{ZIP64}','{ZIP65}','{ZIP66}', + '{ZIP67}','{ZIP68}','{ZIP69}','{ZIP70}','{ZIP71}', + '{ZIP72}','{ZIP73}','{ZIP74}','{ZIP75}','{ZIP76}', + '{ZIP77}','{ZIP78}','{ZIP79}','{ZIP80}','{ZIP81}', + '{ZIP82}','{ZIP83}','{ZIP84}','{ZIP85}','{ZIP86}', + '{ZIP87}','{ZIP88}','{ZIP89}','{ZIP90}','{ZIP91}', + '{ZIP92}','{ZIP93}','{ZIP94}','{ZIP95}','{ZIP96}', + '{ZIP97}','{ZIP98}','{ZIP99}','{ZIP100}','{ZIP101}', + '{ZIP102}','{ZIP103}','{ZIP104}','{ZIP105}','{ZIP106}', + '{ZIP107}','{ZIP108}','{ZIP109}','{ZIP110}','{ZIP111}', + '{ZIP112}','{ZIP113}','{ZIP114}','{ZIP115}','{ZIP116}', + '{ZIP117}','{ZIP118}','{ZIP119}','{ZIP120}','{ZIP121}', + '{ZIP122}','{ZIP123}','{ZIP124}','{ZIP125}','{ZIP126}', + '{ZIP127}','{ZIP128}','{ZIP129}','{ZIP130}','{ZIP131}', + '{ZIP132}','{ZIP133}','{ZIP134}','{ZIP135}','{ZIP136}', + '{ZIP137}','{ZIP138}','{ZIP139}','{ZIP140}','{ZIP141}', + '{ZIP142}','{ZIP143}','{ZIP144}','{ZIP145}','{ZIP146}', + '{ZIP147}','{ZIP148}','{ZIP149}','{ZIP150}','{ZIP151}', + '{ZIP152}','{ZIP153}','{ZIP154}','{ZIP155}','{ZIP156}', + '{ZIP157}','{ZIP158}','{ZIP159}','{ZIP160}','{ZIP161}', + '{ZIP162}','{ZIP163}','{ZIP164}','{ZIP165}','{ZIP166}', + '{ZIP167}','{ZIP168}','{ZIP169}','{ZIP170}','{ZIP171}', + '{ZIP172}','{ZIP173}','{ZIP174}','{ZIP175}','{ZIP176}', + '{ZIP177}','{ZIP178}','{ZIP179}','{ZIP180}','{ZIP181}', + '{ZIP182}','{ZIP183}','{ZIP184}','{ZIP185}','{ZIP186}', + '{ZIP187}','{ZIP188}','{ZIP189}','{ZIP190}','{ZIP191}', + '{ZIP192}','{ZIP193}','{ZIP194}','{ZIP195}','{ZIP196}', + '{ZIP197}','{ZIP198}','{ZIP199}','{ZIP200}','{ZIP201}', + '{ZIP202}','{ZIP203}','{ZIP204}','{ZIP205}','{ZIP206}', + '{ZIP207}','{ZIP208}','{ZIP209}','{ZIP210}','{ZIP211}', + '{ZIP212}','{ZIP213}','{ZIP214}','{ZIP215}','{ZIP216}', + '{ZIP217}','{ZIP218}','{ZIP219}','{ZIP220}','{ZIP221}', + '{ZIP222}','{ZIP223}','{ZIP224}','{ZIP225}','{ZIP226}', + '{ZIP227}','{ZIP228}','{ZIP229}','{ZIP230}','{ZIP231}', + '{ZIP232}','{ZIP233}','{ZIP234}','{ZIP235}','{ZIP236}', + '{ZIP237}','{ZIP238}','{ZIP239}','{ZIP240}','{ZIP241}', + '{ZIP242}','{ZIP243}','{ZIP244}','{ZIP245}','{ZIP246}', + '{ZIP247}','{ZIP248}','{ZIP249}','{ZIP250}','{ZIP251}', + '{ZIP252}','{ZIP253}','{ZIP254}','{ZIP255}','{ZIP256}', + '{ZIP257}','{ZIP258}','{ZIP259}','{ZIP260}','{ZIP261}', + '{ZIP262}','{ZIP263}','{ZIP264}','{ZIP265}','{ZIP266}', + '{ZIP267}','{ZIP268}','{ZIP269}','{ZIP270}','{ZIP271}', + '{ZIP272}','{ZIP273}','{ZIP274}','{ZIP275}','{ZIP276}', + '{ZIP277}','{ZIP278}','{ZIP279}','{ZIP280}','{ZIP281}', + '{ZIP282}','{ZIP283}','{ZIP284}','{ZIP285}','{ZIP286}', + '{ZIP287}','{ZIP288}','{ZIP289}','{ZIP290}','{ZIP291}', + '{ZIP292}','{ZIP293}','{ZIP294}','{ZIP295}','{ZIP296}', + '{ZIP297}','{ZIP298}','{ZIP299}','{ZIP300}','{ZIP301}', + '{ZIP302}','{ZIP303}','{ZIP304}','{ZIP305}','{ZIP306}', + '{ZIP307}','{ZIP308}','{ZIP309}','{ZIP310}','{ZIP311}', + '{ZIP312}','{ZIP313}','{ZIP314}','{ZIP315}','{ZIP316}', + '{ZIP317}','{ZIP318}','{ZIP319}','{ZIP320}','{ZIP321}', + '{ZIP322}','{ZIP323}','{ZIP324}','{ZIP325}','{ZIP326}', + '{ZIP327}','{ZIP328}','{ZIP329}','{ZIP330}','{ZIP331}', + '{ZIP332}','{ZIP333}','{ZIP334}','{ZIP335}','{ZIP336}', + '{ZIP337}','{ZIP338}','{ZIP339}','{ZIP340}','{ZIP341}', + '{ZIP342}','{ZIP343}','{ZIP344}','{ZIP345}','{ZIP346}', + '{ZIP347}','{ZIP348}','{ZIP349}','{ZIP350}','{ZIP351}', + '{ZIP352}','{ZIP353}','{ZIP354}','{ZIP355}','{ZIP356}', + '{ZIP357}','{ZIP358}','{ZIP359}','{ZIP360}','{ZIP361}', + '{ZIP362}','{ZIP363}','{ZIP364}','{ZIP365}','{ZIP366}', + '{ZIP367}','{ZIP368}','{ZIP369}','{ZIP370}','{ZIP371}', + '{ZIP372}','{ZIP373}','{ZIP374}','{ZIP375}','{ZIP376}', + '{ZIP377}','{ZIP378}','{ZIP379}','{ZIP380}','{ZIP381}', + '{ZIP382}','{ZIP383}','{ZIP384}','{ZIP385}','{ZIP386}', + '{ZIP387}','{ZIP388}','{ZIP389}','{ZIP390}','{ZIP391}', + '{ZIP392}','{ZIP393}','{ZIP394}','{ZIP395}','{ZIP396}', + '{ZIP397}','{ZIP398}','{ZIP399}','{ZIP400}') + intersect + select ca_zip + from (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt + FROM customer_address, customer + WHERE ca_address_sk = c_current_addr_sk and + c_preferred_cust_flag='Y' + group by ca_zip + having count(*) > 10)A1)A2) V1 + where ss_store_sk = s_store_sk + and ss_sold_date_sk = d_date_sk + and d_qoy = {QOY} and d_year = {YEAR} + and (substr(s_zip,1,2) = substr(V1.ca_zip,1,2)) + group by s_store_name + order by s_store_name + limit 100""", + 'parameter': + { + 'ZIP': { + 'type': "integer", + 'size': 400, + 'range': [10000,99999] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'QOY': { + 'type': "integer", + 'range': [1,2] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q9", + 'query': """select case when (select count(*) + from store_sales + where ss_quantity between 1 and 20) > {RC1} + then (select avg({AGGCTHEN}) + from store_sales + where ss_quantity between 1 and 20) + else (select avg({AGGCELSE}) + from store_sales + where ss_quantity between 1 and 20) end bucket1 , + case when (select count(*) + from store_sales + where ss_quantity between 21 and 40) > {RC2} + then (select avg({AGGCTHEN}) + from store_sales + where ss_quantity between 21 and 40) + else (select avg({AGGCELSE}) + from store_sales + where ss_quantity between 21 and 40) end bucket2, + case when (select count(*) + from store_sales + where ss_quantity between 41 and 60) > {RC3} + then (select avg({AGGCTHEN}) + from store_sales + where ss_quantity between 41 and 60) + else (select avg({AGGCELSE}) + from store_sales + where ss_quantity between 41 and 60) end bucket3, + case when (select count(*) + from store_sales + where ss_quantity between 61 and 80) > {RC4} + then (select avg({AGGCTHEN}) + from store_sales + where ss_quantity between 61 and 80) + else (select avg({AGGCELSE}) + from store_sales + where ss_quantity between 61 and 80) end bucket4, + case when (select count(*) + from store_sales + where ss_quantity between 81 and 100) > {RC5} + then (select avg({AGGCTHEN}) + from store_sales + where ss_quantity between 81 and 100) + else (select avg({AGGCELSE}) + from store_sales + where ss_quantity between 81 and 100) end bucket5 + from reason + where r_reason_sk = 1 + """, + 'parameter': + { + 'RC': { + 'type': "integer", + 'size': 5, + 'range': [1,567080] + }, + 'AGGCTHEN': { + 'type': "list", + 'range': ["ss_ext_discount_amt","ss_ext_sales_price","ss_ext_list_price","ss_ext_tax"] + }, + 'AGGCELSE': { + 'type': "list", + 'range': ["ss_net_paid","ss_net_paid_inc_tax","ss_net_profit"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q10", + 'query': """select + cd_gender, + cd_marital_status, + cd_education_status, + count(*) cnt1, + cd_purchase_estimate, + count(*) cnt2, + cd_credit_rating, + count(*) cnt3, + cd_dep_count, + count(*) cnt4, + cd_dep_employed_count, + count(*) cnt5, + cd_dep_college_count, + count(*) cnt6 + from + customer c,customer_address ca,customer_demographics + where + c.c_current_addr_sk = ca.ca_address_sk and + ca_county in ('{COUNTY1}','{COUNTY2}','{COUNTY3}','{COUNTY4}','{COUNTY5}','{COUNTY6}','{COUNTY7}','{COUNTY8}','{COUNTY9}','{COUNTY10}') and + cd_demo_sk = c.c_current_cdemo_sk and + exists (select * + from store_sales,date_dim + where c.c_customer_sk = ss_customer_sk and + ss_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} and {MONTH}+3) and + (exists (select * + from web_sales,date_dim + where c.c_customer_sk = ws_bill_customer_sk and + ws_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} ANd {MONTH}+3) or + exists (select * + from catalog_sales,date_dim + where c.c_customer_sk = cs_ship_customer_sk and + cs_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} and {MONTH}+3)) + group by cd_gender, + cd_marital_status, + cd_education_status, + cd_purchase_estimate, + cd_credit_rating, + cd_dep_count, + cd_dep_employed_count, + cd_dep_college_count + order by cd_gender, + cd_marital_status, + cd_education_status, + cd_purchase_estimate, + cd_credit_rating, + cd_dep_count, + cd_dep_employed_count, + cd_dep_college_count + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,4] + }, + 'COUNTY': { + 'type': "list", + 'size': 10, + 'range': ["Ziebach County","Zavala County","Zapata County","Yuma County","Yukon-Koyukuk Census Area","Yuba County","Young County","York County","Yolo County","Yoakum County","Yellowstone County","Yellow Medicine County","Yell County","Yazoo County","Yavapai County","Yates County","Yankton County","Yancey County","Yamhill County","Yalobusha County","Yakutat Borough","Yakima County","Yadkin County","Wythe County","Wyoming County","Wyandotte County","Wyandot County","Wright County","Worth County","Worcester County","Woodward County","Woodson County","Woods County","Woodruff County","Woodford County","Woodbury County","Wood County","Wolfe County","Wise County","Wirt County","Winston County","Winona County","Winneshiek County","Winnebago County","Winn Parish","Winkler County","Windsor County","Windham County","Winchester city","Wilson County","Williamson County","Williamsburg County","Williamsburg city","Williams County","Willacy County","Will County","Wilkinson County","Wilkin County","Wilkes County","Wilcox County","Wilbarger County","Wicomico County","Wichita County","Wibaux County","Whitman County","Whitley County","Whitfield County","Whiteside County","White Pine County","White County","Wheeler County","Wheatland County","Whatcom County","Wharton County","Wexford County","Wetzel County","Weston County","Westmoreland County","Westchester County","West Feliciana Parish","West Carroll Parish","West Baton Rouge Parish","Wells County","Weld County","Webster Parish","Webster County","Weber County","Webb County","Weakley County","Waynesboro city","Wayne County","Waushara County","Waupaca County","Waukesha County","Watonwan County","Watauga County","Washtenaw County","Washoe County","Washita County","Washington Parish","Washington County","Washburn County","Washakie County","Waseca County","Wasco County","Wasatch County","Warrick County","Warren County","Ware County","Ward County","Wapello County","Walworth County","Walton County","Walthall County","Walsh County","Wallowa County","Waller County","Wallace County","Walla Walla County","Walker County","Waldo County","Wakulla County","Wake County","Wahkiakum County","Wagoner County","Wadena County","Wade Hampton Census Area","Wabaunsee County","Wabasha County","Wabash County","Volusia County","Virginia Beach city","Vinton County","Vilas County","Vigo County","Victoria County","Vernon Parish","Vernon County","Vermillion County","Vermilion Parish","Vermilion County","Ventura County","Venango County","Vanderburgh County","Vance County","Van Zandt County","Van Wert County","Van Buren County","Valley County","Valencia County","Valdez-Cordova Census Area","Val Verde County","Uvalde County","Utah County","Upton County","Upson County","Upshur County","Union Parish","Union County","Unicoi County","Umatilla County","Ulster County","Uintah County","Uinta County","Tyrrell County","Tyler County","Twin Falls County","Twiggs County","Tuscola County","Tuscarawas County","Tuscaloosa County","Turner County","Tuolumne County","Tunica County","Tulsa County","Tulare County","Tucker County","Trumbull County","Trousdale County","Troup County","Tripp County","Trinity County","Trimble County","Trigg County","Treutlen County","Trempealeau County","Trego County","Treasure County","Travis County","Traverse County","Transylvania County","Traill County","Towns County","Towner County","Torrance County","Toombs County","Toole County","Tooele County","Tompkins County","Tom Green County","Tolland County","Todd County","Titus County","Tishomingo County","Tipton County","Tippecanoe County","Tippah County","Tioga County","Tillman County","Tillamook County","Tift County","Thurston County","Throckmorton County","Thomas County","Thayer County","Texas County","Teton County","Terry County","Terrell County","Terrebonne Parish","Tensas Parish","Teller County","Telfair County","Tehama County","Tazewell County","Taylor County","Tattnall County","Tate County","Tarrant County","Taos County","Tangipahoa Parish","Taney County","Tama County","Tallapoosa County","Tallahatchie County","Talladega County","Taliaferro County","Talbot County","Switzerland County","Swisher County","Swift County","Sweetwater County","Sweet Grass County","Swain County","Suwannee County","Sutton County","Sutter County","Sussex County","Susquehanna County","Surry County","Sunflower County","Sumter County","Sumner County","Summit County","Summers County","Sully County","Sullivan County","Suffolk County","Suffolk city","Sublette County","Stutsman County","Strafford County","Story County","Storey County","Stonewall County","Stone County","Stokes County","Stoddard County","Stillwater County","Stewart County","Stevens County","Steuben County","Sterling County","Stephenson County","Stephens County","Steele County","Stearns County","Staunton city","Starr County","Starke County","Stark County","Stanton County","Stanly County","Stanley County","Stanislaus County","Stafford County","Spotsylvania County","Spokane County","Spink County","Spencer County","Spartanburg County","Spalding County","Southampton County","Sonoma County","Somervell County","Somerset County","Solano County","Socorro County","Snyder County","Snohomish County","Smyth County","Smith County","Slope County","Skamania County","Skagit County","Sitka Borough","Siskiyou County","Sioux County","Simpson County","Silver Bow County","Sierra County","Sibley County","Shoshone County","Shiawassee County","Sherman County","Sheridan County","Sherburne County","Shenandoah County","Shelby County","Sheboygan County","Shawnee County","Shawano County","Shasta County","Sharp County","Sharkey County","Shannon County","Shackelford County","Seward County","Sevier County","Sequoyah County","Sequatchie County","Seneca County","Seminole County","Sedgwick County","Sebastian County","Searcy County","Scurry County","Screven County","Scotts Bluff County","Scott County","Scotland County","Scioto County","Schuylkill County","Schuyler County","Schoolcraft County","Schoharie County","Schley County","Schleicher County","Schenectady County","Sawyer County","Saunders County","Sauk County","Sarpy County","Sargent County","Saratoga County","Sarasota County","Santa Rosa County","Santa Fe County","Santa Cruz County","Santa Clara County","Santa Barbara County","Sanpete County","Sanilac County","Sangamon County","Sandusky County","Sandoval County","Sanders County","Sanborn County","San Saba County","San Patricio County","San Miguel County","San Mateo County","San Luis Obispo County","San Juan County","San Joaquin County","San Jacinto County","San Francisco County","San Diego County","San Bernardino County","San Benito County","San Augustine County","Sampson County","Saluda County","Salt Lake County","Saline County","Salem County","Salem city","Saguache County","Saginaw County","Sagadahoc County","Sacramento County","Sac County","Sabine Parish","Sabine County","Rutland County","Rutherford County","Russell County","Rusk County","Rush County","Runnels County","Rowan County","Routt County","Ross County","Rosebud County","Roseau County","Roscommon County","Roosevelt County","Rooks County","Rolette County","Rogers County","Roger Mills County","Rockwall County","Rockland County","Rockingham County","Rockdale County","Rockcastle County","Rockbridge County","Rock Island County","Rock County","Robeson County","Robertson County","Roberts County","Roanoke County","Roanoke city","Roane County","Riverside County","Ritchie County","Ripley County","Rio Grande County","Rio Blanco County","Rio Arriba County","Ringgold County","Riley County","Richmond County","Richmond city","Richland Parish","Richland County","Richardson County","Rich County","Rice County","Rhea County","Reynolds County","Republic County","Renville County","Rensselaer County","Reno County","Refugio County","Reeves County","Redwood County","Red Willow County","Red River Parish","Red River County","Red Lake County","Real County","Reagan County","Ray County","Rawlins County","Ravalli County","Rappahannock County","Rapides Parish","Ransom County","Rankin County","Randolph County","Randall County","Ramsey County","Ralls County","Raleigh County","Rains County","Radford city","Racine County","Rabun County","Quitman County","Queens County","Queen Anne County","Quay County","Putnam County","Pushmataha County","Pulaski County","Pueblo County","Prowers County","Providence County","Prince William County","Prince George County","Prince Edward County","Price County","Preston County","Presque Isle County","Presidio County","Prentiss County","Preble County","Pratt County","Prairie County","Powhatan County","Poweshiek County","Power County","Powell County","Powder River County","Potter County","Pottawattamie County","Pottawatomie County","Posey County","Portsmouth city","Porter County","Portage County","Poquoson city","Pope County","Pontotoc County","Pondera County","Polk County","Pointe Coupee Parish","Poinsett County","Pocahontas County","Plymouth County","Plumas County","Pleasants County","Platte County","Plaquemines Parish","Placer County","Piute County","Pittsylvania County","Pittsburg County","Pitt County","Pitkin County","Piscataquis County","Pipestone County","Pinellas County","Pine County","Pinal County","Pima County","Pike County","Pierce County","Pickett County","Pickens County","Pickaway County","Piatt County","Phillips County","Philadelphia County","Phelps County","Pettis County","Petroleum County","Petersburg city","Person County","Pershing County","Perry County","Perquimans County","Perkins County","Pepin County","Peoria County","Penobscot County","Pennington County","Pendleton County","Pender County","Pend Oreille County","Pemiscot County","Pembina County","Pecos County","Pearl River County","Peach County","Payne County","Payette County","Pawnee County","Paulding County","Patrick County","Passaic County","Pasquotank County","Pasco County","Parmer County","Parker County","Parke County","Park County","Panola County","Pamlico County","Palo Pinto County","Palo Alto County","Palm Beach County","Page County","Pacific County","Ozaukee County","Ozark County","Oxford County","Owyhee County","Owsley County","Owen County","Overton County","Outagamie County","Ouray County","Ouachita Parish","Ouachita County","Otter Tail County","Ottawa County","Otsego County","Otoe County","Otero County","Oswego County","Oscoda County","Osceola County","Osborne County","Osage County","Orleans Parish","Orleans County","Oregon County","Orangeburg County","Orange County","Ontonagon County","Ontario County","Onslow County","Onondaga County","Oneida County","Olmsted County","Oliver County","Oldham County","Oktibbeha County","Okmulgee County","Oklahoma County","Okfuskee County","Okeechobee County","Okanogan County","Okaloosa County","Ohio County","Oglethorpe County","Ogle County","Ogemaw County","Oconto County","Oconee County","Ochiltree County","Oceana County","Ocean County","Obion County","Oakland County","O-Brien County","Nye County","Nueces County","Nuckolls County","Noxubee County","Nowata County","Nottoway County","Norton County","Norton city","Northwest Arctic Borough","Northumberland County","Northampton County","North Slope Borough","Norman County","Norfolk County","Norfolk city","Nome Census Area","Nolan County","Nodaway County","Nobles County","Noble County","Niobrara County","Nicollet County","Nicholas County","Niagara County","Nez Perce County","Newton County","Newport News city","Newport County","Newberry County","Newaygo County","New York County","New Madrid County","New London County","New Kent County","New Haven County","New Hanover County","New Castle County","Nevada County","Ness County","Neshoba County","Neosho County","Nemaha County","Nelson County","Navarro County","Navajo County","Natrona County","Natchitoches Parish","Nassau County","Nash County","Napa County","Nantucket County","Nance County","Nacogdoches County","Musselshell County","Muskogee County","Muskingum County","Muskegon County","Muscogee County","Muscatine County","Murray County","Multnomah County","Muhlenberg County","Mower County","Mountrail County","Moultrie County","Motley County","Morton County","Morrow County","Morrison County","Morris County","Morrill County","Morgan County","Morehouse Parish","Mora County","Moore County","Moody County","Montrose County","Montour County","Montmorency County","Montgomery County","Montezuma County","Monterey County","Montcalm County","Montague County","Monroe County","Monongalia County","Monona County","Mono County","Monmouth County","Moniteau County","Mohave County","Moffat County","Modoc County","Mobile County","Mitchell County","Missoula County","Mississippi County","Missaukee County","Minnehaha County","Minidoka County","Mingo County","Mineral County","Miner County","Milwaukee County","Mills County","Miller County","Mille Lacs County","Millard County","Milam County","Mifflin County","Midland County","Middlesex County","Miami County","Metcalfe County","Mesa County","Merrimack County","Merrick County","Meriwether County","Mercer County","Merced County","Menominee County","Menifee County","Mendocino County","Menard County","Mellette County","Meigs County","Meeker County","Medina County","Mecosta County","Mecklenburg County","Meagher County","Meade County","McPherson County","McNairy County","McMullen County","McMinn County","McLeod County","McLennan County","McLean County","McKinley County","McKenzie County","McKean County","McIntosh County","McHenry County","McDuffie County","McDowell County","McDonough County","McDonald County","McCurtain County","McCulloch County","McCreary County","McCracken County","McCormick County","McCook County","McCone County","McClain County","Mayes County","Maverick County","Maury County","Maui County","Mathews County","Matanuska-Susitna Borough","Matagorda County","Massac County","Mason County","Martinsville city","Martin County","Marshall County","Marquette County","Marlboro County","Mariposa County","Marion County","Marinette County","Marin County","Maries County","Maricopa County","Marengo County","Marathon County","Manitowoc County","Manistee County","Manatee County","Manassas Park city","Manassas city","Malheur County","Major County","Mahoning County","Mahnomen County","Mahaska County","Magoffin County","Madison Parish","Madison County","Madera County","Macoupin County","Macon County","Macomb County","Mackinac County","Lyon County","Lynn County","Lynchburg city","Lyman County","Lycoming County","Luzerne County","Lunenburg County","Luna County","Lumpkin County","Luce County","Lucas County","Lubbock County","Lowndes County","Loving County","Love County","Loup County","Louisa County","Loudoun County","Loudon County","Los Angeles County","Los Alamos County","Lorain County","Lonoke County","Long County","Logan County","Llano County","Livingston Parish","Livingston County","Live Oak County","Little River County","Litchfield County","Lipscomb County","Linn County","Lincoln Parish","Lincoln County","Limestone County","Licking County","Liberty County","Lexington County","Lexington city","Lewis County","Lewis and Clark County","Levy County","Letcher County","Leslie County","Leon County","Lenoir County","Lenawee County","Lemhi County","Lehigh County","Leflore County","Leelanau County","Lee County","Lebanon County","Leavenworth County","Leake County","Lea County","Le Sueur County","Le Flore County","Lawrence County","Lavaca County","Laurens County","Laurel County","Lauderdale County","Latimer County","Latah County","Lassen County","Las Animas County","Larue County","Larimer County","Laramie County","Lapeer County","Lanier County","Langlade County","Lane County","Lander County","Lancaster County","Lampasas County","LaMoure County","Lamoille County","Lamb County","Lamar County","Lake of the Woods County","Lake County","Lake and Peninsula Borough","Lagrange County","Lafourche Parish","Lafayette Parish","Lafayette County","Laclede County","Lackawanna County","Lac qui Parle County","Labette County","La Salle Parish","La Salle County","La Porte County","La Plata County","La Paz County","La Crosse County","Kossuth County","Kosciusko County","Kootenai County","Koochiching County","Kodiak Island Borough","Knox County","Knott County","Klickitat County","Kleberg County","Klamath County","Kittson County","Kittitas County","Kitsap County","Kit Carson County","Kiowa County","Kinney County","Kingsbury County","Kings County","Kingman County","Kingfisher County","King William County","King George County","King County","King and Queen County","Kimble County","Kimball County","Kidder County","Keya Paha County","Keweenaw County","Kewaunee County","Ketchikan Gateway Borough","Kershaw County","Kerr County","Kern County","Keokuk County","Kenton County","Kent County","Kenosha County","Kennebec County","Kenedy County","Kendall County","Kenai Peninsula Borough","Kemper County","Keith County","Kearny County","Kearney County","Kay County","Kaufman County","Kauai County","Karnes County","Kankakee County","Kane County","Kandiyohi County","Kanawha County","Kanabec County","Kalkaska County","Kalamazoo County","Juniata County","Juneau County","Juneau Borough","Judith Basin County","Juab County","Josephine County","Jones County","Johnston County","Johnson County","Jo Daviess County","Jim Wells County","Jim Hogg County","Jewell County","Jessamine County","Jersey County","Jerome County","Jerauld County","Jennings County","Jenkins County","Jefferson Parish","Jefferson Davis Parish","Jefferson Davis County","Jefferson County","Jeff Davis County","Jay County","Jasper County","James City County","Jackson Parish","Jackson County","Jack County","Izard County","Itawamba County","Itasca County","Issaquena County","Isle of Wight County","Island County","Isanti County","Isabella County","Irwin County","Iroquois County","Iron County","Irion County","Iredell County","Iowa County","Iosco County","Ionia County","Inyo County","Ingham County","Indiana County","Indian River County","Independence County","Imperial County","Idaho County","Ida County","Iberville Parish","Iberia Parish","Hyde County","Hutchinson County","Huron County","Huntington County","Huntingdon County","Hunterdon County","Hunt County","Humphreys County","Humboldt County","Hughes County","Huerfano County","Hudspeth County","Hudson County","Hubbard County","Howell County","Howard County","Houston County","Houghton County","Hot Springs County","Hot Spring County","Horry County","Hopkins County","Hopewell city","Hooker County","Hood River County","Hood County","Honolulu County","Holt County","Holmes County","Hoke County","Hodgeman County","Hockley County","Hocking County","Hitchcock County","Hinsdale County","Hinds County","Hillsdale County","Hillsborough County","Hill County","Highlands County","Highland County","Hidalgo County","Hickory County","Hickman County","Hettinger County","Hertford County","Hernando County","Herkimer County","Henry County","Henrico County","Hennepin County","Hendry County","Hendricks County","Henderson County","Hempstead County","Hemphill County","Heard County","Haywood County","Hays County","Hayes County","Hawkins County","Hawaii County","Haskell County","Harvey County","Hartley County","Hartford County","Hart County","Harrisonburg city","Harrison County","Harris County","Harper County","Harney County","Harnett County","Harmon County","Harlan County","Harford County","Hardy County","Harding County","Hardin County","Hardeman County","Hardee County","Haralson County","Hanson County","Hansford County","Hanover County","Hand County","Hancock County","Hampton County","Hampton city","Hampshire County","Hampden County","Hamlin County","Hamilton County","Hamblen County","Hall County","Halifax County","Hale County","Haines Borough","Habersham County","Haakon County","Gwinnett County","Guthrie County","Gunnison County","Gulf County","Guilford County","Guernsey County","Guadalupe County","Grundy County","Grimes County","Griggs County","Grenada County","Gregory County","Gregg County","Greer County","Greenwood County","Greenville County","Greenup County","Greensville County","Greenlee County","Greene County","Greenbrier County","Green Lake County","Green County","Greeley County","Grayson County","Grays Harbor County","Gray County","Graves County","Gratiot County","Granville County","Grant Parish","Grant County","Granite County","Grand Traverse County","Grand Isle County","Grand Forks County","Grand County","Grainger County","Graham County","Grafton County","Grady County","Gove County","Gosper County","Goshen County","Gordon County","Gooding County","Goodhue County","Goochland County","Gonzales County","Goliad County","Golden Valley County","Gogebic County","Glynn County","Gloucester County","Glenn County","Glasscock County","Glascock County","Gladwin County","Glades County","Glacier County","Gilpin County","Gilmer County","Gilliam County","Gillespie County","Giles County","Gilchrist County","Gila County","Gibson County","Georgetown County","George County","Gentry County","Geneva County","Genesee County","Gem County","Geauga County","Geary County","Gates County","Gaston County","Gasconade County","Garza County","Garvin County","Garrett County","Garrard County","Garland County","Garfield County","Garden County","Galveston County","Gallia County","Gallatin County","Galax city","Gaines County","Gage County","Gadsden County","Furnas County","Fulton County","Frontier County","Frio County","Fresno County","Fremont County","Freestone County","Freeborn County","Fredericksburg city","Frederick County","Franklin Parish","Franklin County","Franklin city","Fountain County","Foster County","Fort Bend County","Forsyth County","Forrest County","Forest County","Ford County","Fond du Lac County","Foard County","Fluvanna County","Floyd County","Florence County","Fleming County","Flathead County","Flagler County","Fisher County","Finney County","Fillmore County","Ferry County","Fergus County","Fentress County","Fayette County","Fauquier County","Faulkner County","Faulk County","Faribault County","Fannin County","Falls County","Falls Church city","Fallon County","Fall River County","Fairfield County","Fairfax County","Fairfax city","Fairbanks North Star Borough","Evans County","Evangeline Parish","Eureka County","Etowah County","Estill County","Essex County","Esmeralda County","Escambia County","Erie County","Erath County","Emporia city","Emmons County","Emmet County","Emery County","Emanuel County","Elmore County","Ellsworth County","Ellis County","Elliott County","Elko County","Elkhart County","Elk County","Elbert County","El Paso County","El Dorado County","Effingham County","Edwards County","Edmunds County","Edmonson County","Edgefield County","Edgecombe County","Edgar County","Eddy County","Ector County","Echols County","Eau Claire County","Eaton County","Eastland County","East Feliciana Parish","East Carroll Parish","East Baton Rouge Parish","Early County","Eagle County","Dyer County","Duval County","Dutchess County","Durham County","Duplin County","DuPage County","Dunn County","Dunklin County","Dundy County","Dukes County","Duchesne County","Dubuque County","Dubois County","Drew County","Douglas County","Dougherty County","Dorchester County","Door County","Dooly County","Donley County","Doniphan County","Dona Ana County","Dolores County","Dodge County","Doddridge County","Dixon County","Dixie County","Divide County","District of Columbia","Dinwiddie County","Dimmit County","Dillon County","Dillingham Census Area","Dickson County","Dickinson County","Dickey County","Dickenson County","Dickens County","DeWitt County","Dewey County","Deuel County","DeSoto County","Desha County","Deschutes County","Des Moines County","Denver County","Denton County","Dent County","Denali Borough","Delta County","Delaware County","Del Norte County","DeKalb County","Defiance County","Deer Lodge County","Decatur County","DeBaca County","Dearborn County","Deaf Smith County","De Witt County","De Soto Parish","De Kalb County","Day County","Dawson County","Dawes County","Davison County","Davis County","Daviess County","Davie County","Davidson County","Dauphin County","Darlington County","Darke County","Dare County","Danville city","Daniels County","Dane County","Dallas County","Dallam County","Dale County","Dakota County","Daggett County","Dade County","Cuyahoga County","Custer County","Curry County","Currituck County","Cuming County","Cumberland County","Culpeper County","Cullman County","Culberson County","Crowley County","Crow Wing County","Cross County","Crosby County","Crook County","Crockett County","Crittenden County","Crisp County","Crenshaw County","Creek County","Crawford County","Craven County","Crane County","Craighead County","Craig County","Cowlitz County","Cowley County","Coweta County","Covington County","Covington city","Cottonwood County","Cotton County","Cottle County","Costilla County","Coshocton County","Coryell County","Cortland County","Corson County","Copiah County","Coosa County","Coos County","Cooper County","Cooke County","Cook County","Conway County","Converse County","Contra Costa County","Conejos County","Conecuh County","Concordia Parish","Concho County","Comanche County","Comal County","Colusa County","Columbus County","Columbiana County","Columbia County","Colquitt County","Colorado County","Colonial Heights city","Collingsworth County","Collin County","Collier County","Colleton County","Colfax County","Coles County","Coleman County","Cole County","Colbert County","Coke County","Coffey County","Coffee County","Codington County","Coconino County","Cocke County","Cochran County","Cochise County","Cobb County","Coal County","Coahoma County","Cloud County","Clinton County","Clinch County","Clifton Forge city","Cleveland County","Clermont County","Cleburne County","Clearwater County","Clearfield County","Clear Creek County","Clayton County","Clay County","Clatsop County","Clarke County","Clark County","Clarion County","Clarendon County","Clare County","Clallam County","Claiborne Parish","Claiborne County","Clackamas County","Citrus County","Cimarron County","Cibola County","Churchill County","Christian County","Chowan County","Chouteau County","Choctaw County","Chittenden County","Chisago County","Chippewa County","Chilton County","Childress County","Chicot County","Chickasaw County","Cheyenne County","Chesterfield County","Chester County","Cheshire County","Chesapeake city","Cherry County","Cherokee County","Chenango County","Chemung County","Chelan County","Cheboygan County","Cheatham County","Chaves County","Chautauqua County","Chattooga County","Chattahoochee County","Chatham County","Chase County","Charlton County","Charlottesville city","Charlotte County","Charlevoix County","Charleston County","Charles Mix County","Charles County","Charles City County","Chariton County","Champaign County","Chambers County","Chaffee County","Cerro Gordo County","Centre County","Cedar County","Cecil County","Cayuga County","Cavalier County","Cattaraugus County","Catron County","Catoosa County","Catawba County","Catahoula Parish","Caswell County","Castro County","Cassia County","Cass County","Casey County","Cascade County","Carver County","Carteret County","Carter County","Carson County","Carson City","Carroll County","Caroline County","Carlton County","Carlisle County","Caribou County","Carbon County","Cape May County","Cape Girardeau County","Canyon County","Cannon County","Candler County","Canadian County","Campbell County","Camp County","Cameron Parish","Cameron County","Camden County","Cambria County","Camas County","Calvert County","Calumet County","Calloway County","Callaway County","Callahan County","Calhoun County","Caledonia County","Caldwell Parish","Caldwell County","Calcasieu Parish","Calaveras County","Caddo Parish","Caddo County","Cache County","Cabell County","Cabarrus County","Butts County","Butte County","Butler County","Burt County","Burnett County","Burnet County","Burlington County","Burleson County","Burleigh County","Burke County","Bureau County","Buncombe County","Bullock County","Bulloch County","Bullitt County","Buffalo County","Buena Vista County","Buena Vista city","Bucks County","Buckingham County","Buchanan County","Bryan County","Brunswick County","Brule County","Brown County","Broward County","Broome County","Brooks County","Brookings County","Brooke County","Bronx County","Broadwater County","Bristol County","Bristol city","Bristol Bay Borough","Briscoe County","Brewster County","Brevard County","Bremer County","Breckinridge County","Breathitt County","Brazos County","Brazoria County","Braxton County","Brantley County","Branch County","Bradley County","Bradford County","Bracken County","Boyle County","Boyd County","Box Elder County","Box Butte County","Bowman County","Bowie County","Bourbon County","Boundary County","Boulder County","Bottineau County","Botetourt County","Bossier Parish","Bosque County","Borden County","Boone County","Bonneville County","Bonner County","Bond County","Bon Homme County","Bollinger County","Bolivar County","Boise County","Blue Earth County","Blount County","Bledsoe County","Bleckley County","Bland County","Blanco County","Blair County","Blaine County","Bladen County","Blackford County","Black Hawk County","Bingham County","Billings County","Big Stone County","Big Horn County","Bienville Parish","Bibb County","Bexar County","Bethel Census Area","Bertie County","Berrien County","Bernalillo County","Berkshire County","Berks County","Berkeley County","Bergen County","Benzie County","Benton County","Bent County","Benson County","Bennington County","Bennett County","Benewah County","Ben Hill County","Beltrami County","Belmont County","Bell County","Belknap County","Bee County","Bedford County","Bedford city","Beckham County","Becker County","Beaverhead County","Beaver County","Beauregard Parish","Beaufort County","Bear Lake County","Beadle County","Baylor County","Bayfield County","Bay County","Baxter County","Bath County","Bates County","Bastrop County","Bartow County","Barton County","Bartholomew County","Barry County","Barrow County","Barron County","Barren County","Barnwell County","Barnstable County","Barnes County","Barbour County","Barber County","Baraga County","Bannock County","Banner County","Banks County","Bandera County","Bamberg County","Baltimore County","Baltimore city","Ballard County","Baldwin County","Baker County","Bailey County","Bacon County","Baca County","Avoyelles Parish","Avery County","Autauga County","Austin County","Aurora County","Augusta County","Auglaize County","Audubon County","Audrain County","Attala County","Atoka County","Atlantic County","Atkinson County","Athens County","Atchison County","Atascosa County","Assumption Parish","Asotin County","Ashtabula County","Ashley County","Ashland County","Ashe County","Ascension Parish","Arthur County","Aroostook County","Armstrong County","Arlington County","Arkansas County","Arenac County","Archuleta County","Archer County","Arapahoe County","Aransas County","Appomattox County","Appling County","Appanoose County","Apache County","Antrim County","Antelope County","Anson County","Anoka County","Anne Arundel County","Angelina County","Androscoggin County","Andrews County","Andrew County","Anderson County","Anchorage Borough","Amite County","Amherst County","Amelia County","Amador County","Alpine County","Alpena County","Allendale County","Allen Parish","Allen County","Allegheny County","Alleghany County","Allegany County","Allegan County","Allamakee County","Alger County","Alfalfa County","Alexandria city","Alexander County","Aleutians West Census Area","Aleutians East Borough","Alcorn County","Alcona County","Albemarle County","Albany County","Alamosa County","Alameda County","Alamance County","Alachua County","Aitkin County","Aiken County","Addison County","Adams County","Adair County","Ada County","Accomack County","Acadia Parish","Abbeville County"] + }, + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q11", + 'query': """ with year_total as ( + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,c_preferred_cust_flag customer_preferred_cust_flag + ,c_birth_country customer_birth_country + ,c_login customer_login + ,c_email_address customer_email_address + ,d_year dyear + ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total + ,'s' sale_type + from customer + ,store_sales + ,date_dim + where c_customer_sk = ss_customer_sk + and ss_sold_date_sk = d_date_sk + group by c_customer_id + ,c_first_name + ,c_last_name + ,c_preferred_cust_flag + ,c_birth_country + ,c_login + ,c_email_address + ,d_year + union all + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,c_preferred_cust_flag customer_preferred_cust_flag + ,c_birth_country customer_birth_country + ,c_login customer_login + ,c_email_address customer_email_address + ,d_year dyear + ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total + ,'w' sale_type + from customer + ,web_sales + ,date_dim + where c_customer_sk = ws_bill_customer_sk + and ws_sold_date_sk = d_date_sk + group by c_customer_id + ,c_first_name + ,c_last_name + ,c_preferred_cust_flag + ,c_birth_country + ,c_login + ,c_email_address + ,d_year + ) + select + t_s_secyear.customer_id + ,t_s_secyear.customer_first_name + ,t_s_secyear.customer_last_name + ,{SELECTONE} + from year_total t_s_firstyear + ,year_total t_s_secyear + ,year_total t_w_firstyear + ,year_total t_w_secyear + where t_s_secyear.customer_id = t_s_firstyear.customer_id + and t_s_firstyear.customer_id = t_w_secyear.customer_id + and t_s_firstyear.customer_id = t_w_firstyear.customer_id + and t_s_firstyear.sale_type = 's' + and t_w_firstyear.sale_type = 'w' + and t_s_secyear.sale_type = 's' + and t_w_secyear.sale_type = 'w' + and t_s_firstyear.dyear = {YEAR} + and t_s_secyear.dyear = {YEAR}+1 + and t_w_firstyear.dyear = {YEAR} + and t_w_secyear.dyear = {YEAR}+1 + and t_s_firstyear.year_total > 0 + and t_w_firstyear.year_total > 0 + and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else 0.0 end + > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else 0.0 end + order by t_s_secyear.customer_id + ,t_s_secyear.customer_first_name + ,t_s_secyear.customer_last_name + ,{SELECTONE} + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2001] + }, + 'SELECTONE': { + 'type': "list", + 'range': ["t_s_secyear.customer_preferred_cust_flag","t_s_secyear.customer_birth_country","t_s_secyear.customer_login","t_s_secyear.customer_email_address"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q12", + 'query': """select i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + ,sum(ws_ext_sales_price) as itemrevenue + ,sum(ws_ext_sales_price)*100/sum(sum(ws_ext_sales_price)) over + (partition by i_class) as revenueratio + from + web_sales + ,item + ,date_dim + where + ws_item_sk = i_item_sk + and i_category in ('{CATEGORY1}', '{CATEGORY2}', '{CATEGORY3}') + and ws_sold_date_sk = d_date_sk + and d_date between cast('1998-01-01' as date) + and (cast('1998-01-01' as date) + interval '30' day) + group by + i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + order by + i_category is not null, i_category + ,i_class is not null, i_class + ,i_item_id is not null, i_item_id + ,i_item_desc + ,revenueratio + limit 100""", + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'size': 3, + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q13", + 'query': """select avg(ss_quantity) avg_ss_quantity + ,avg(ss_ext_sales_price) avg_ss_ext_sales_price + ,avg(ss_ext_wholesale_cost) avg_ss_ext_wholesale_cost + ,sum(ss_ext_wholesale_cost) sum_ss_ext_wholesale_cost + from store_sales + ,store + ,customer_demographics + ,household_demographics + ,customer_address + ,date_dim + where s_store_sk = ss_store_sk + and ss_sold_date_sk = d_date_sk and d_year = 2001 + and((ss_hdemo_sk=hd_demo_sk + and cd_demo_sk = ss_cdemo_sk + and cd_marital_status = '{MS1}' + and cd_education_status = '{ES1}' + and ss_sales_price between 100.00 and 150.00 + and hd_dep_count = 3 + )or + (ss_hdemo_sk=hd_demo_sk + and cd_demo_sk = ss_cdemo_sk + and cd_marital_status = '{MS2}' + and cd_education_status = '{ES2}' + and ss_sales_price between 50.00 and 100.00 + and hd_dep_count = 1 + ) or + (ss_hdemo_sk=hd_demo_sk + and cd_demo_sk = ss_cdemo_sk + and cd_marital_status = '{MS3}' + and cd_education_status = '{ES3}' + and ss_sales_price between 150.00 and 200.00 + and hd_dep_count = 1 + )) + and((ss_addr_sk = ca_address_sk + and ca_country = 'United States' + and ca_state in ('{STATE1}', '{STATE2}', '{STATE3}') + and ss_net_profit between 100 and 200 + ) or + (ss_addr_sk = ca_address_sk + and ca_country = 'United States' + and ca_state in ('{STATE4}', '{STATE5}', '{STATE6}') + and ss_net_profit between 150 and 300 + ) or + (ss_addr_sk = ca_address_sk + and ca_country = 'United States' + and ca_state in ('{STATE7}', '{STATE8}', '{STATE9}') + and ss_net_profit between 50 and 250 + )) + """, + 'parameter': + { + 'STATE': { + 'type': "list", + 'size': 9, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'MS': { + 'type': "list", + 'size': 3, + 'range': ["M","S","D","W","U"] + }, + 'ES': { + 'type': "list", + 'size': 3, + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q14a+b", + 'query': ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by channel, i_brand_id,i_class_id,i_category_id with rollup + order by channel,i_brand_id,i_class_id,i_category_id + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + 'DBMS': { + 'MariaDB': + ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by channel, i_brand_id,i_class_id,i_category_id with rollup + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + 'MonetDB': + ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by rollup(channel, i_brand_id,i_class_id,i_category_id) + order by channel,i_brand_id,i_class_id,i_category_id + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + 'PostgreSQL': + ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by rollup(channel, i_brand_id,i_class_id,i_category_id) + order by channel,i_brand_id,i_class_id,i_category_id + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + 'Exasol': + ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by rollup(channel, i_brand_id,i_class_id,i_category_id) + order by channel,i_brand_id,i_class_id,i_category_id + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + 'MemSQL': + ["""with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) z + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) + from( + select 'store' channel, i_brand_id,i_class_id + ,i_category_id,sum(ss_quantity*ss_list_price) sales + , count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) + union all + select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales + from catalog_sales + ,item + ,date_dim + where cs_item_sk in (select ss_item_sk from cross_items) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) + union all + select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales + from web_sales + ,item + ,date_dim + where ws_item_sk in (select ss_item_sk from cross_items) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR}+2 + and d_moy = 11 + group by i_brand_id,i_class_id,i_category_id + having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) + ) y + group by rollup(channel, i_brand_id,i_class_id,i_category_id) + order by channel,i_brand_id,i_class_id,i_category_id + limit 100""", """ with cross_items as + (select i_item_sk ss_item_sk + from item, + (select iss.i_brand_id brand_id + ,iss.i_class_id class_id + ,iss.i_category_id category_id + from store_sales + ,item iss + ,date_dim d1 + where ss_item_sk = iss.i_item_sk + and ss_sold_date_sk = d1.d_date_sk + and d1.d_year between {YEAR} AND {YEAR} + 2 + intersect + select ics.i_brand_id + ,ics.i_class_id + ,ics.i_category_id + from catalog_sales + ,item ics + ,date_dim d2 + where cs_item_sk = ics.i_item_sk + and cs_sold_date_sk = d2.d_date_sk + and d2.d_year between {YEAR} AND {YEAR} + 2 + intersect + select iws.i_brand_id + ,iws.i_class_id + ,iws.i_category_id + from web_sales + ,item iws + ,date_dim d3 + where ws_item_sk = iws.i_item_sk + and ws_sold_date_sk = d3.d_date_sk + and d3.d_year between {YEAR} AND {YEAR} + 2) x + where i_brand_id = brand_id + and i_class_id = class_id + and i_category_id = category_id + ), + avg_sales as + (select avg(quantity*list_price) average_sales + from (select ss_quantity quantity + ,ss_list_price list_price + from store_sales + ,date_dim + where ss_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select cs_quantity quantity + ,cs_list_price list_price + from catalog_sales + ,date_dim + where cs_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2 + union all + select ws_quantity quantity + ,ws_list_price list_price + from web_sales + ,date_dim + where ws_sold_date_sk = d_date_sk + and d_year between {YEAR} and {YEAR} + 2) x) + select this_year.channel ty_channel + ,this_year.i_brand_id ty_brand + ,this_year.i_class_id ty_class + ,this_year.i_category_id ty_category + ,this_year.sales ty_sales + ,this_year.number_sales ty_number_sales + ,last_year.channel ly_channel + ,last_year.i_brand_id ly_brand + ,last_year.i_class_id ly_class + ,last_year.i_category_id ly_category + ,last_year.sales ly_sales + ,last_year.number_sales ly_number_sales + from + (select 'store' channel, i_brand_id,i_class_id,i_category_id + ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + 1 + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, + (select 'store' channel, i_brand_id,i_class_id + ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales + from store_sales + ,item + ,date_dim + where ss_item_sk in (select ss_item_sk from cross_items) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_week_seq = (select d_week_seq + from date_dim + where d_year = {YEAR} + and d_moy = 12 + and d_dom = {DAY}) + group by i_brand_id,i_class_id,i_category_id + having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year + where this_year.i_brand_id= last_year.i_brand_id + and this_year.i_class_id = last_year.i_class_id + and this_year.i_category_id = last_year.i_category_id + order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id + limit 100"""], + }, + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,28] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q15", + 'query': """ select ca_zip + ,sum(cs_sales_price) as sum_sales_prices + from catalog_sales + ,customer + ,customer_address + ,date_dim + where cs_bill_customer_sk = c_customer_sk + and c_current_addr_sk = ca_address_sk + and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', + '85392', '85460', '80348', '81792') + or ca_state in ('CA','WA','GA') + or cs_sales_price > 500) + and cs_sold_date_sk = d_date_sk + and d_qoy = {QOY} and d_year = {YEAR} + group by ca_zip + order by ca_zip is not null, ca_zip + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'QOY': { + 'type': "integer", + 'range': [1,2] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q16", + 'query': """select + count(distinct cs_order_number) as "order count" + ,sum(cs_ext_ship_cost) as "total shipping cost" + ,sum(cs_net_profit) as "total net profit" + from + catalog_sales cs1 + ,date_dim + ,customer_address + ,call_center + where + d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '60' day) + and cs1.cs_ship_date_sk = d_date_sk + and cs1.cs_ship_addr_sk = ca_address_sk + and ca_state = '{STATE}' + and cs1.cs_call_center_sk = cc_call_center_sk + and cc_county in ('{COUNTY1}','{COUNTY2}','{COUNTY3}','{COUNTY4}','{COUNTY5}') + and exists (select * + from catalog_sales cs2 + where cs1.cs_order_number = cs2.cs_order_number + and cs1.cs_warehouse_sk <> cs2.cs_warehouse_sk) + and not exists(select * + from catalog_returns cr1 + where cs1.cs_order_number = cr1.cr_order_number) + order by count(distinct cs_order_number) + limit 100""", + 'parameter': + { + 'COUNTY': { + 'type': "list", + 'size': 5, + 'range': ["Ziebach County","Zavala County","Zapata County","Yuma County","Yukon-Koyukuk Census Area","Yuba County","Young County","York County","Yolo County","Yoakum County","Yellowstone County","Yellow Medicine County","Yell County","Yazoo County","Yavapai County","Yates County","Yankton County","Yancey County","Yamhill County","Yalobusha County","Yakutat Borough","Yakima County","Yadkin County","Wythe County","Wyoming County","Wyandotte County","Wyandot County","Wright County","Worth County","Worcester County","Woodward County","Woodson County","Woods County","Woodruff County","Woodford County","Woodbury County","Wood County","Wolfe County","Wise County","Wirt County","Winston County","Winona County","Winneshiek County","Winnebago County","Winn Parish","Winkler County","Windsor County","Windham County","Winchester city","Wilson County","Williamson County","Williamsburg County","Williamsburg city","Williams County","Willacy County","Will County","Wilkinson County","Wilkin County","Wilkes County","Wilcox County","Wilbarger County","Wicomico County","Wichita County","Wibaux County","Whitman County","Whitley County","Whitfield County","Whiteside County","White Pine County","White County","Wheeler County","Wheatland County","Whatcom County","Wharton County","Wexford County","Wetzel County","Weston County","Westmoreland County","Westchester County","West Feliciana Parish","West Carroll Parish","West Baton Rouge Parish","Wells County","Weld County","Webster Parish","Webster County","Weber County","Webb County","Weakley County","Waynesboro city","Wayne County","Waushara County","Waupaca County","Waukesha County","Watonwan County","Watauga County","Washtenaw County","Washoe County","Washita County","Washington Parish","Washington County","Washburn County","Washakie County","Waseca County","Wasco County","Wasatch County","Warrick County","Warren County","Ware County","Ward County","Wapello County","Walworth County","Walton County","Walthall County","Walsh County","Wallowa County","Waller County","Wallace County","Walla Walla County","Walker County","Waldo County","Wakulla County","Wake County","Wahkiakum County","Wagoner County","Wadena County","Wade Hampton Census Area","Wabaunsee County","Wabasha County","Wabash County","Volusia County","Virginia Beach city","Vinton County","Vilas County","Vigo County","Victoria County","Vernon Parish","Vernon County","Vermillion County","Vermilion Parish","Vermilion County","Ventura County","Venango County","Vanderburgh County","Vance County","Van Zandt County","Van Wert County","Van Buren County","Valley County","Valencia County","Valdez-Cordova Census Area","Val Verde County","Uvalde County","Utah County","Upton County","Upson County","Upshur County","Union Parish","Union County","Unicoi County","Umatilla County","Ulster County","Uintah County","Uinta County","Tyrrell County","Tyler County","Twin Falls County","Twiggs County","Tuscola County","Tuscarawas County","Tuscaloosa County","Turner County","Tuolumne County","Tunica County","Tulsa County","Tulare County","Tucker County","Trumbull County","Trousdale County","Troup County","Tripp County","Trinity County","Trimble County","Trigg County","Treutlen County","Trempealeau County","Trego County","Treasure County","Travis County","Traverse County","Transylvania County","Traill County","Towns County","Towner County","Torrance County","Toombs County","Toole County","Tooele County","Tompkins County","Tom Green County","Tolland County","Todd County","Titus County","Tishomingo County","Tipton County","Tippecanoe County","Tippah County","Tioga County","Tillman County","Tillamook County","Tift County","Thurston County","Throckmorton County","Thomas County","Thayer County","Texas County","Teton County","Terry County","Terrell County","Terrebonne Parish","Tensas Parish","Teller County","Telfair County","Tehama County","Tazewell County","Taylor County","Tattnall County","Tate County","Tarrant County","Taos County","Tangipahoa Parish","Taney County","Tama County","Tallapoosa County","Tallahatchie County","Talladega County","Taliaferro County","Talbot County","Switzerland County","Swisher County","Swift County","Sweetwater County","Sweet Grass County","Swain County","Suwannee County","Sutton County","Sutter County","Sussex County","Susquehanna County","Surry County","Sunflower County","Sumter County","Sumner County","Summit County","Summers County","Sully County","Sullivan County","Suffolk County","Suffolk city","Sublette County","Stutsman County","Strafford County","Story County","Storey County","Stonewall County","Stone County","Stokes County","Stoddard County","Stillwater County","Stewart County","Stevens County","Steuben County","Sterling County","Stephenson County","Stephens County","Steele County","Stearns County","Staunton city","Starr County","Starke County","Stark County","Stanton County","Stanly County","Stanley County","Stanislaus County","Stafford County","Spotsylvania County","Spokane County","Spink County","Spencer County","Spartanburg County","Spalding County","Southampton County","Sonoma County","Somervell County","Somerset County","Solano County","Socorro County","Snyder County","Snohomish County","Smyth County","Smith County","Slope County","Skamania County","Skagit County","Sitka Borough","Siskiyou County","Sioux County","Simpson County","Silver Bow County","Sierra County","Sibley County","Shoshone County","Shiawassee County","Sherman County","Sheridan County","Sherburne County","Shenandoah County","Shelby County","Sheboygan County","Shawnee County","Shawano County","Shasta County","Sharp County","Sharkey County","Shannon County","Shackelford County","Seward County","Sevier County","Sequoyah County","Sequatchie County","Seneca County","Seminole County","Sedgwick County","Sebastian County","Searcy County","Scurry County","Screven County","Scotts Bluff County","Scott County","Scotland County","Scioto County","Schuylkill County","Schuyler County","Schoolcraft County","Schoharie County","Schley County","Schleicher County","Schenectady County","Sawyer County","Saunders County","Sauk County","Sarpy County","Sargent County","Saratoga County","Sarasota County","Santa Rosa County","Santa Fe County","Santa Cruz County","Santa Clara County","Santa Barbara County","Sanpete County","Sanilac County","Sangamon County","Sandusky County","Sandoval County","Sanders County","Sanborn County","San Saba County","San Patricio County","San Miguel County","San Mateo County","San Luis Obispo County","San Juan County","San Joaquin County","San Jacinto County","San Francisco County","San Diego County","San Bernardino County","San Benito County","San Augustine County","Sampson County","Saluda County","Salt Lake County","Saline County","Salem County","Salem city","Saguache County","Saginaw County","Sagadahoc County","Sacramento County","Sac County","Sabine Parish","Sabine County","Rutland County","Rutherford County","Russell County","Rusk County","Rush County","Runnels County","Rowan County","Routt County","Ross County","Rosebud County","Roseau County","Roscommon County","Roosevelt County","Rooks County","Rolette County","Rogers County","Roger Mills County","Rockwall County","Rockland County","Rockingham County","Rockdale County","Rockcastle County","Rockbridge County","Rock Island County","Rock County","Robeson County","Robertson County","Roberts County","Roanoke County","Roanoke city","Roane County","Riverside County","Ritchie County","Ripley County","Rio Grande County","Rio Blanco County","Rio Arriba County","Ringgold County","Riley County","Richmond County","Richmond city","Richland Parish","Richland County","Richardson County","Rich County","Rice County","Rhea County","Reynolds County","Republic County","Renville County","Rensselaer County","Reno County","Refugio County","Reeves County","Redwood County","Red Willow County","Red River Parish","Red River County","Red Lake County","Real County","Reagan County","Ray County","Rawlins County","Ravalli County","Rappahannock County","Rapides Parish","Ransom County","Rankin County","Randolph County","Randall County","Ramsey County","Ralls County","Raleigh County","Rains County","Radford city","Racine County","Rabun County","Quitman County","Queens County","Queen Anne County","Quay County","Putnam County","Pushmataha County","Pulaski County","Pueblo County","Prowers County","Providence County","Prince William County","Prince George County","Prince Edward County","Price County","Preston County","Presque Isle County","Presidio County","Prentiss County","Preble County","Pratt County","Prairie County","Powhatan County","Poweshiek County","Power County","Powell County","Powder River County","Potter County","Pottawattamie County","Pottawatomie County","Posey County","Portsmouth city","Porter County","Portage County","Poquoson city","Pope County","Pontotoc County","Pondera County","Polk County","Pointe Coupee Parish","Poinsett County","Pocahontas County","Plymouth County","Plumas County","Pleasants County","Platte County","Plaquemines Parish","Placer County","Piute County","Pittsylvania County","Pittsburg County","Pitt County","Pitkin County","Piscataquis County","Pipestone County","Pinellas County","Pine County","Pinal County","Pima County","Pike County","Pierce County","Pickett County","Pickens County","Pickaway County","Piatt County","Phillips County","Philadelphia County","Phelps County","Pettis County","Petroleum County","Petersburg city","Person County","Pershing County","Perry County","Perquimans County","Perkins County","Pepin County","Peoria County","Penobscot County","Pennington County","Pendleton County","Pender County","Pend Oreille County","Pemiscot County","Pembina County","Pecos County","Pearl River County","Peach County","Payne County","Payette County","Pawnee County","Paulding County","Patrick County","Passaic County","Pasquotank County","Pasco County","Parmer County","Parker County","Parke County","Park County","Panola County","Pamlico County","Palo Pinto County","Palo Alto County","Palm Beach County","Page County","Pacific County","Ozaukee County","Ozark County","Oxford County","Owyhee County","Owsley County","Owen County","Overton County","Outagamie County","Ouray County","Ouachita Parish","Ouachita County","Otter Tail County","Ottawa County","Otsego County","Otoe County","Otero County","Oswego County","Oscoda County","Osceola County","Osborne County","Osage County","Orleans Parish","Orleans County","Oregon County","Orangeburg County","Orange County","Ontonagon County","Ontario County","Onslow County","Onondaga County","Oneida County","Olmsted County","Oliver County","Oldham County","Oktibbeha County","Okmulgee County","Oklahoma County","Okfuskee County","Okeechobee County","Okanogan County","Okaloosa County","Ohio County","Oglethorpe County","Ogle County","Ogemaw County","Oconto County","Oconee County","Ochiltree County","Oceana County","Ocean County","Obion County","Oakland County","O-Brien County","Nye County","Nueces County","Nuckolls County","Noxubee County","Nowata County","Nottoway County","Norton County","Norton city","Northwest Arctic Borough","Northumberland County","Northampton County","North Slope Borough","Norman County","Norfolk County","Norfolk city","Nome Census Area","Nolan County","Nodaway County","Nobles County","Noble County","Niobrara County","Nicollet County","Nicholas County","Niagara County","Nez Perce County","Newton County","Newport News city","Newport County","Newberry County","Newaygo County","New York County","New Madrid County","New London County","New Kent County","New Haven County","New Hanover County","New Castle County","Nevada County","Ness County","Neshoba County","Neosho County","Nemaha County","Nelson County","Navarro County","Navajo County","Natrona County","Natchitoches Parish","Nassau County","Nash County","Napa County","Nantucket County","Nance County","Nacogdoches County","Musselshell County","Muskogee County","Muskingum County","Muskegon County","Muscogee County","Muscatine County","Murray County","Multnomah County","Muhlenberg County","Mower County","Mountrail County","Moultrie County","Motley County","Morton County","Morrow County","Morrison County","Morris County","Morrill County","Morgan County","Morehouse Parish","Mora County","Moore County","Moody County","Montrose County","Montour County","Montmorency County","Montgomery County","Montezuma County","Monterey County","Montcalm County","Montague County","Monroe County","Monongalia County","Monona County","Mono County","Monmouth County","Moniteau County","Mohave County","Moffat County","Modoc County","Mobile County","Mitchell County","Missoula County","Mississippi County","Missaukee County","Minnehaha County","Minidoka County","Mingo County","Mineral County","Miner County","Milwaukee County","Mills County","Miller County","Mille Lacs County","Millard County","Milam County","Mifflin County","Midland County","Middlesex County","Miami County","Metcalfe County","Mesa County","Merrimack County","Merrick County","Meriwether County","Mercer County","Merced County","Menominee County","Menifee County","Mendocino County","Menard County","Mellette County","Meigs County","Meeker County","Medina County","Mecosta County","Mecklenburg County","Meagher County","Meade County","McPherson County","McNairy County","McMullen County","McMinn County","McLeod County","McLennan County","McLean County","McKinley County","McKenzie County","McKean County","McIntosh County","McHenry County","McDuffie County","McDowell County","McDonough County","McDonald County","McCurtain County","McCulloch County","McCreary County","McCracken County","McCormick County","McCook County","McCone County","McClain County","Mayes County","Maverick County","Maury County","Maui County","Mathews County","Matanuska-Susitna Borough","Matagorda County","Massac County","Mason County","Martinsville city","Martin County","Marshall County","Marquette County","Marlboro County","Mariposa County","Marion County","Marinette County","Marin County","Maries County","Maricopa County","Marengo County","Marathon County","Manitowoc County","Manistee County","Manatee County","Manassas Park city","Manassas city","Malheur County","Major County","Mahoning County","Mahnomen County","Mahaska County","Magoffin County","Madison Parish","Madison County","Madera County","Macoupin County","Macon County","Macomb County","Mackinac County","Lyon County","Lynn County","Lynchburg city","Lyman County","Lycoming County","Luzerne County","Lunenburg County","Luna County","Lumpkin County","Luce County","Lucas County","Lubbock County","Lowndes County","Loving County","Love County","Loup County","Louisa County","Loudoun County","Loudon County","Los Angeles County","Los Alamos County","Lorain County","Lonoke County","Long County","Logan County","Llano County","Livingston Parish","Livingston County","Live Oak County","Little River County","Litchfield County","Lipscomb County","Linn County","Lincoln Parish","Lincoln County","Limestone County","Licking County","Liberty County","Lexington County","Lexington city","Lewis County","Lewis and Clark County","Levy County","Letcher County","Leslie County","Leon County","Lenoir County","Lenawee County","Lemhi County","Lehigh County","Leflore County","Leelanau County","Lee County","Lebanon County","Leavenworth County","Leake County","Lea County","Le Sueur County","Le Flore County","Lawrence County","Lavaca County","Laurens County","Laurel County","Lauderdale County","Latimer County","Latah County","Lassen County","Las Animas County","Larue County","Larimer County","Laramie County","Lapeer County","Lanier County","Langlade County","Lane County","Lander County","Lancaster County","Lampasas County","LaMoure County","Lamoille County","Lamb County","Lamar County","Lake of the Woods County","Lake County","Lake and Peninsula Borough","Lagrange County","Lafourche Parish","Lafayette Parish","Lafayette County","Laclede County","Lackawanna County","Lac qui Parle County","Labette County","La Salle Parish","La Salle County","La Porte County","La Plata County","La Paz County","La Crosse County","Kossuth County","Kosciusko County","Kootenai County","Koochiching County","Kodiak Island Borough","Knox County","Knott County","Klickitat County","Kleberg County","Klamath County","Kittson County","Kittitas County","Kitsap County","Kit Carson County","Kiowa County","Kinney County","Kingsbury County","Kings County","Kingman County","Kingfisher County","King William County","King George County","King County","King and Queen County","Kimble County","Kimball County","Kidder County","Keya Paha County","Keweenaw County","Kewaunee County","Ketchikan Gateway Borough","Kershaw County","Kerr County","Kern County","Keokuk County","Kenton County","Kent County","Kenosha County","Kennebec County","Kenedy County","Kendall County","Kenai Peninsula Borough","Kemper County","Keith County","Kearny County","Kearney County","Kay County","Kaufman County","Kauai County","Karnes County","Kankakee County","Kane County","Kandiyohi County","Kanawha County","Kanabec County","Kalkaska County","Kalamazoo County","Juniata County","Juneau County","Juneau Borough","Judith Basin County","Juab County","Josephine County","Jones County","Johnston County","Johnson County","Jo Daviess County","Jim Wells County","Jim Hogg County","Jewell County","Jessamine County","Jersey County","Jerome County","Jerauld County","Jennings County","Jenkins County","Jefferson Parish","Jefferson Davis Parish","Jefferson Davis County","Jefferson County","Jeff Davis County","Jay County","Jasper County","James City County","Jackson Parish","Jackson County","Jack County","Izard County","Itawamba County","Itasca County","Issaquena County","Isle of Wight County","Island County","Isanti County","Isabella County","Irwin County","Iroquois County","Iron County","Irion County","Iredell County","Iowa County","Iosco County","Ionia County","Inyo County","Ingham County","Indiana County","Indian River County","Independence County","Imperial County","Idaho County","Ida County","Iberville Parish","Iberia Parish","Hyde County","Hutchinson County","Huron County","Huntington County","Huntingdon County","Hunterdon County","Hunt County","Humphreys County","Humboldt County","Hughes County","Huerfano County","Hudspeth County","Hudson County","Hubbard County","Howell County","Howard County","Houston County","Houghton County","Hot Springs County","Hot Spring County","Horry County","Hopkins County","Hopewell city","Hooker County","Hood River County","Hood County","Honolulu County","Holt County","Holmes County","Hoke County","Hodgeman County","Hockley County","Hocking County","Hitchcock County","Hinsdale County","Hinds County","Hillsdale County","Hillsborough County","Hill County","Highlands County","Highland County","Hidalgo County","Hickory County","Hickman County","Hettinger County","Hertford County","Hernando County","Herkimer County","Henry County","Henrico County","Hennepin County","Hendry County","Hendricks County","Henderson County","Hempstead County","Hemphill County","Heard County","Haywood County","Hays County","Hayes County","Hawkins County","Hawaii County","Haskell County","Harvey County","Hartley County","Hartford County","Hart County","Harrisonburg city","Harrison County","Harris County","Harper County","Harney County","Harnett County","Harmon County","Harlan County","Harford County","Hardy County","Harding County","Hardin County","Hardeman County","Hardee County","Haralson County","Hanson County","Hansford County","Hanover County","Hand County","Hancock County","Hampton County","Hampton city","Hampshire County","Hampden County","Hamlin County","Hamilton County","Hamblen County","Hall County","Halifax County","Hale County","Haines Borough","Habersham County","Haakon County","Gwinnett County","Guthrie County","Gunnison County","Gulf County","Guilford County","Guernsey County","Guadalupe County","Grundy County","Grimes County","Griggs County","Grenada County","Gregory County","Gregg County","Greer County","Greenwood County","Greenville County","Greenup County","Greensville County","Greenlee County","Greene County","Greenbrier County","Green Lake County","Green County","Greeley County","Grayson County","Grays Harbor County","Gray County","Graves County","Gratiot County","Granville County","Grant Parish","Grant County","Granite County","Grand Traverse County","Grand Isle County","Grand Forks County","Grand County","Grainger County","Graham County","Grafton County","Grady County","Gove County","Gosper County","Goshen County","Gordon County","Gooding County","Goodhue County","Goochland County","Gonzales County","Goliad County","Golden Valley County","Gogebic County","Glynn County","Gloucester County","Glenn County","Glasscock County","Glascock County","Gladwin County","Glades County","Glacier County","Gilpin County","Gilmer County","Gilliam County","Gillespie County","Giles County","Gilchrist County","Gila County","Gibson County","Georgetown County","George County","Gentry County","Geneva County","Genesee County","Gem County","Geauga County","Geary County","Gates County","Gaston County","Gasconade County","Garza County","Garvin County","Garrett County","Garrard County","Garland County","Garfield County","Garden County","Galveston County","Gallia County","Gallatin County","Galax city","Gaines County","Gage County","Gadsden County","Furnas County","Fulton County","Frontier County","Frio County","Fresno County","Fremont County","Freestone County","Freeborn County","Fredericksburg city","Frederick County","Franklin Parish","Franklin County","Franklin city","Fountain County","Foster County","Fort Bend County","Forsyth County","Forrest County","Forest County","Ford County","Fond du Lac County","Foard County","Fluvanna County","Floyd County","Florence County","Fleming County","Flathead County","Flagler County","Fisher County","Finney County","Fillmore County","Ferry County","Fergus County","Fentress County","Fayette County","Fauquier County","Faulkner County","Faulk County","Faribault County","Fannin County","Falls County","Falls Church city","Fallon County","Fall River County","Fairfield County","Fairfax County","Fairfax city","Fairbanks North Star Borough","Evans County","Evangeline Parish","Eureka County","Etowah County","Estill County","Essex County","Esmeralda County","Escambia County","Erie County","Erath County","Emporia city","Emmons County","Emmet County","Emery County","Emanuel County","Elmore County","Ellsworth County","Ellis County","Elliott County","Elko County","Elkhart County","Elk County","Elbert County","El Paso County","El Dorado County","Effingham County","Edwards County","Edmunds County","Edmonson County","Edgefield County","Edgecombe County","Edgar County","Eddy County","Ector County","Echols County","Eau Claire County","Eaton County","Eastland County","East Feliciana Parish","East Carroll Parish","East Baton Rouge Parish","Early County","Eagle County","Dyer County","Duval County","Dutchess County","Durham County","Duplin County","DuPage County","Dunn County","Dunklin County","Dundy County","Dukes County","Duchesne County","Dubuque County","Dubois County","Drew County","Douglas County","Dougherty County","Dorchester County","Door County","Dooly County","Donley County","Doniphan County","Dona Ana County","Dolores County","Dodge County","Doddridge County","Dixon County","Dixie County","Divide County","District of Columbia","Dinwiddie County","Dimmit County","Dillon County","Dillingham Census Area","Dickson County","Dickinson County","Dickey County","Dickenson County","Dickens County","DeWitt County","Dewey County","Deuel County","DeSoto County","Desha County","Deschutes County","Des Moines County","Denver County","Denton County","Dent County","Denali Borough","Delta County","Delaware County","Del Norte County","DeKalb County","Defiance County","Deer Lodge County","Decatur County","DeBaca County","Dearborn County","Deaf Smith County","De Witt County","De Soto Parish","De Kalb County","Day County","Dawson County","Dawes County","Davison County","Davis County","Daviess County","Davie County","Davidson County","Dauphin County","Darlington County","Darke County","Dare County","Danville city","Daniels County","Dane County","Dallas County","Dallam County","Dale County","Dakota County","Daggett County","Dade County","Cuyahoga County","Custer County","Curry County","Currituck County","Cuming County","Cumberland County","Culpeper County","Cullman County","Culberson County","Crowley County","Crow Wing County","Cross County","Crosby County","Crook County","Crockett County","Crittenden County","Crisp County","Crenshaw County","Creek County","Crawford County","Craven County","Crane County","Craighead County","Craig County","Cowlitz County","Cowley County","Coweta County","Covington County","Covington city","Cottonwood County","Cotton County","Cottle County","Costilla County","Coshocton County","Coryell County","Cortland County","Corson County","Copiah County","Coosa County","Coos County","Cooper County","Cooke County","Cook County","Conway County","Converse County","Contra Costa County","Conejos County","Conecuh County","Concordia Parish","Concho County","Comanche County","Comal County","Colusa County","Columbus County","Columbiana County","Columbia County","Colquitt County","Colorado County","Colonial Heights city","Collingsworth County","Collin County","Collier County","Colleton County","Colfax County","Coles County","Coleman County","Cole County","Colbert County","Coke County","Coffey County","Coffee County","Codington County","Coconino County","Cocke County","Cochran County","Cochise County","Cobb County","Coal County","Coahoma County","Cloud County","Clinton County","Clinch County","Clifton Forge city","Cleveland County","Clermont County","Cleburne County","Clearwater County","Clearfield County","Clear Creek County","Clayton County","Clay County","Clatsop County","Clarke County","Clark County","Clarion County","Clarendon County","Clare County","Clallam County","Claiborne Parish","Claiborne County","Clackamas County","Citrus County","Cimarron County","Cibola County","Churchill County","Christian County","Chowan County","Chouteau County","Choctaw County","Chittenden County","Chisago County","Chippewa County","Chilton County","Childress County","Chicot County","Chickasaw County","Cheyenne County","Chesterfield County","Chester County","Cheshire County","Chesapeake city","Cherry County","Cherokee County","Chenango County","Chemung County","Chelan County","Cheboygan County","Cheatham County","Chaves County","Chautauqua County","Chattooga County","Chattahoochee County","Chatham County","Chase County","Charlton County","Charlottesville city","Charlotte County","Charlevoix County","Charleston County","Charles Mix County","Charles County","Charles City County","Chariton County","Champaign County","Chambers County","Chaffee County","Cerro Gordo County","Centre County","Cedar County","Cecil County","Cayuga County","Cavalier County","Cattaraugus County","Catron County","Catoosa County","Catawba County","Catahoula Parish","Caswell County","Castro County","Cassia County","Cass County","Casey County","Cascade County","Carver County","Carteret County","Carter County","Carson County","Carson City","Carroll County","Caroline County","Carlton County","Carlisle County","Caribou County","Carbon County","Cape May County","Cape Girardeau County","Canyon County","Cannon County","Candler County","Canadian County","Campbell County","Camp County","Cameron Parish","Cameron County","Camden County","Cambria County","Camas County","Calvert County","Calumet County","Calloway County","Callaway County","Callahan County","Calhoun County","Caledonia County","Caldwell Parish","Caldwell County","Calcasieu Parish","Calaveras County","Caddo Parish","Caddo County","Cache County","Cabell County","Cabarrus County","Butts County","Butte County","Butler County","Burt County","Burnett County","Burnet County","Burlington County","Burleson County","Burleigh County","Burke County","Bureau County","Buncombe County","Bullock County","Bulloch County","Bullitt County","Buffalo County","Buena Vista County","Buena Vista city","Bucks County","Buckingham County","Buchanan County","Bryan County","Brunswick County","Brule County","Brown County","Broward County","Broome County","Brooks County","Brookings County","Brooke County","Bronx County","Broadwater County","Bristol County","Bristol city","Bristol Bay Borough","Briscoe County","Brewster County","Brevard County","Bremer County","Breckinridge County","Breathitt County","Brazos County","Brazoria County","Braxton County","Brantley County","Branch County","Bradley County","Bradford County","Bracken County","Boyle County","Boyd County","Box Elder County","Box Butte County","Bowman County","Bowie County","Bourbon County","Boundary County","Boulder County","Bottineau County","Botetourt County","Bossier Parish","Bosque County","Borden County","Boone County","Bonneville County","Bonner County","Bond County","Bon Homme County","Bollinger County","Bolivar County","Boise County","Blue Earth County","Blount County","Bledsoe County","Bleckley County","Bland County","Blanco County","Blair County","Blaine County","Bladen County","Blackford County","Black Hawk County","Bingham County","Billings County","Big Stone County","Big Horn County","Bienville Parish","Bibb County","Bexar County","Bethel Census Area","Bertie County","Berrien County","Bernalillo County","Berkshire County","Berks County","Berkeley County","Bergen County","Benzie County","Benton County","Bent County","Benson County","Bennington County","Bennett County","Benewah County","Ben Hill County","Beltrami County","Belmont County","Bell County","Belknap County","Bee County","Bedford County","Bedford city","Beckham County","Becker County","Beaverhead County","Beaver County","Beauregard Parish","Beaufort County","Bear Lake County","Beadle County","Baylor County","Bayfield County","Bay County","Baxter County","Bath County","Bates County","Bastrop County","Bartow County","Barton County","Bartholomew County","Barry County","Barrow County","Barron County","Barren County","Barnwell County","Barnstable County","Barnes County","Barbour County","Barber County","Baraga County","Bannock County","Banner County","Banks County","Bandera County","Bamberg County","Baltimore County","Baltimore city","Ballard County","Baldwin County","Baker County","Bailey County","Bacon County","Baca County","Avoyelles Parish","Avery County","Autauga County","Austin County","Aurora County","Augusta County","Auglaize County","Audubon County","Audrain County","Attala County","Atoka County","Atlantic County","Atkinson County","Athens County","Atchison County","Atascosa County","Assumption Parish","Asotin County","Ashtabula County","Ashley County","Ashland County","Ashe County","Ascension Parish","Arthur County","Aroostook County","Armstrong County","Arlington County","Arkansas County","Arenac County","Archuleta County","Archer County","Arapahoe County","Aransas County","Appomattox County","Appling County","Appanoose County","Apache County","Antrim County","Antelope County","Anson County","Anoka County","Anne Arundel County","Angelina County","Androscoggin County","Andrews County","Andrew County","Anderson County","Anchorage Borough","Amite County","Amherst County","Amelia County","Amador County","Alpine County","Alpena County","Allendale County","Allen Parish","Allen County","Allegheny County","Alleghany County","Allegany County","Allegan County","Allamakee County","Alger County","Alfalfa County","Alexandria city","Alexander County","Aleutians West Census Area","Aleutians East Borough","Alcorn County","Alcona County","Albemarle County","Albany County","Alamosa County","Alameda County","Alamance County","Alachua County","Aitkin County","Aiken County","Addison County","Adams County","Adair County","Ada County","Accomack County","Acadia Parish","Abbeville County"] + }, + 'MONTH': { + 'type': "integer", + 'range': [2,5] + }, + 'STATE': { + 'type': "list", + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q17", + 'query': """ select i_item_id + ,i_item_desc + ,s_state + ,count(ss_quantity) as store_sales_quantitycount + ,avg(ss_quantity) as store_sales_quantityave + ,stddev_samp(ss_quantity) as store_sales_quantitystdev + ,stddev_samp(ss_quantity)/avg(ss_quantity) as store_sales_quantitycov + ,count(sr_return_quantity) as store_returns_quantitycount + ,avg(sr_return_quantity) as store_returns_quantityave + ,stddev_samp(sr_return_quantity) as store_returns_quantitystdev + ,stddev_samp(sr_return_quantity)/avg(sr_return_quantity) as store_returns_quantitycov + ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave + ,stddev_samp(cs_quantity) as catalog_sales_quantitystdev + ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitycov + from store_sales + ,store_returns + ,catalog_sales + ,date_dim d1 + ,date_dim d2 + ,date_dim d3 + ,store + ,item + where d1.d_quarter_name = '{YEAR}Q1' + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and ss_customer_sk = sr_customer_sk + and ss_item_sk = sr_item_sk + and ss_ticket_number = sr_ticket_number + and sr_returned_date_sk = d2.d_date_sk + and d2.d_quarter_name in ('{YEAR}Q1','{YEAR}Q2','{YEAR}Q3') + and sr_customer_sk = cs_bill_customer_sk + and sr_item_sk = cs_item_sk + and cs_sold_date_sk = d3.d_date_sk + and d3.d_quarter_name in ('{YEAR}Q1','{YEAR}Q2','{YEAR}Q3') + group by i_item_id + ,i_item_desc + ,s_state + order by i_item_id + ,i_item_desc + ,s_state + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q18", + 'query': """select i_item_id, + ca_country, + ca_state, + ca_county, + avg( cast(cs_quantity as decimal(12,2))) agg1, + avg( cast(cs_list_price as decimal(12,2))) agg2, + avg( cast(cs_coupon_amt as decimal(12,2))) agg3, + avg( cast(cs_sales_price as decimal(12,2))) agg4, + avg( cast(cs_net_profit as decimal(12,2))) agg5, + avg( cast(c_birth_year as decimal(12,2))) agg6, + avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7 + from catalog_sales, customer_demographics cd1, + customer_demographics cd2, customer, customer_address, date_dim, item + where cs_sold_date_sk = d_date_sk and + cs_item_sk = i_item_sk and + cs_bill_cdemo_sk = cd1.cd_demo_sk and + cs_bill_customer_sk = c_customer_sk and + cd1.cd_gender = '{GEN}' and + cd1.cd_education_status = '{ES}' and + c_current_cdemo_sk = cd2.cd_demo_sk and + c_current_addr_sk = ca_address_sk and + c_birth_month in ({MONTH1},{MONTH2},{MONTH3},{MONTH4},{MONTH5},{MONTH6}) and + d_year = {YEAR} and + ca_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}') + group by i_item_id, ca_country, ca_state, ca_county with rollup + order by ca_country is not null, ca_country, + ca_state is not null, ca_state, + ca_county is not null, ca_county, + i_item_id is not null, i_item_id + limit 100""", + 'DBMS': { + 'PostgreSQL': """select i_item_id, + ca_country, + ca_state, + ca_county, + avg( cast(cs_quantity as decimal(12,2))) agg1, + avg( cast(cs_list_price as decimal(12,2))) agg2, + avg( cast(cs_coupon_amt as decimal(12,2))) agg3, + avg( cast(cs_sales_price as decimal(12,2))) agg4, + avg( cast(cs_net_profit as decimal(12,2))) agg5, + avg( cast(c_birth_year as decimal(12,2))) agg6, + avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7 + from catalog_sales, customer_demographics cd1, + customer_demographics cd2, customer, customer_address, date_dim, item + where cs_sold_date_sk = d_date_sk and + cs_item_sk = i_item_sk and + cs_bill_cdemo_sk = cd1.cd_demo_sk and + cs_bill_customer_sk = c_customer_sk and + cd1.cd_gender = '{GEN}' and + cd1.cd_education_status = '{ES}' and + c_current_cdemo_sk = cd2.cd_demo_sk and + c_current_addr_sk = ca_address_sk and + c_birth_month in ({MONTH1},{MONTH2},{MONTH3},{MONTH4},{MONTH5},{MONTH6}) and + d_year = {YEAR} and + ca_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}') + group by rollup(i_item_id, ca_country, ca_state, ca_county) + order by ca_country is not null, ca_country, + ca_state is not null, ca_state, + ca_county is not null, ca_county, + i_item_id is not null, i_item_id + limit 100""", + 'MonetDB': """select i_item_id, + ca_country, + ca_state, + ca_county, + avg( cast(cs_quantity as decimal(12,2))) agg1, + avg( cast(cs_list_price as decimal(12,2))) agg2, + avg( cast(cs_coupon_amt as decimal(12,2))) agg3, + avg( cast(cs_sales_price as decimal(12,2))) agg4, + avg( cast(cs_net_profit as decimal(12,2))) agg5, + avg( cast(c_birth_year as decimal(12,2))) agg6, + avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7 + from catalog_sales, customer_demographics cd1, + customer_demographics cd2, customer, customer_address, date_dim, item + where cs_sold_date_sk = d_date_sk and + cs_item_sk = i_item_sk and + cs_bill_cdemo_sk = cd1.cd_demo_sk and + cs_bill_customer_sk = c_customer_sk and + cd1.cd_gender = '{GEN}' and + cd1.cd_education_status = '{ES}' and + c_current_cdemo_sk = cd2.cd_demo_sk and + c_current_addr_sk = ca_address_sk and + c_birth_month in ({MONTH1},{MONTH2},{MONTH3},{MONTH4},{MONTH5},{MONTH6}) and + d_year = {YEAR} and + ca_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}') + group by rollup(i_item_id, ca_country, ca_state, ca_county) + order by ca_country is not null, ca_country, + ca_state is not null, ca_state, + ca_county is not null, ca_county, + i_item_id is not null, i_item_id + limit 100""", + 'MariaDB': """with total as (select i_item_id, + ca_country, + ca_state, + ca_county, + avg( cast(cs_quantity as decimal(12,2))) agg1, + avg( cast(cs_list_price as decimal(12,2))) agg2, + avg( cast(cs_coupon_amt as decimal(12,2))) agg3, + avg( cast(cs_sales_price as decimal(12,2))) agg4, + avg( cast(cs_net_profit as decimal(12,2))) agg5, + avg( cast(c_birth_year as decimal(12,2))) agg6, + avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7 + from catalog_sales, customer_demographics cd1, + customer_demographics cd2, customer, customer_address, date_dim, item + where cs_sold_date_sk = d_date_sk and + cs_item_sk = i_item_sk and + cs_bill_cdemo_sk = cd1.cd_demo_sk and + cs_bill_customer_sk = c_customer_sk and + cd1.cd_gender = '{GEN}' and + cd1.cd_education_status = '{ES}' and + c_current_cdemo_sk = cd2.cd_demo_sk and + c_current_addr_sk = ca_address_sk and + c_birth_month in ({MONTH1},{MONTH2},{MONTH3},{MONTH4},{MONTH5},{MONTH6}) and + d_year = {YEAR} and + ca_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}') + group by i_item_id, ca_country, ca_state, ca_county with rollup) + select * from total + order by ca_country is not null, ca_country, + ca_state is not null, ca_state, + ca_county is not null, ca_county, + i_item_id is not null, i_item_id + limit 100""" + }, + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'STATE': { + 'type': "list", + 'size': 7, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'MONTH': { + 'type': "integer", + 'size': 6, + 'range': [1,12] + }, + 'GEN': { + 'type': "list", + 'range': ["M","F"] + }, + 'ES': { + 'type': "list", + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q19", + 'query': """select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact, + sum(ss_ext_sales_price) ext_price + from date_dim, store_sales, item,customer,customer_address,store + where d_date_sk = ss_sold_date_sk + and ss_item_sk = i_item_sk + and i_manager_id={MANAGER} + and d_moy={MONTH} + and d_year={YEAR} + and ss_customer_sk = c_customer_sk + and c_current_addr_sk = ca_address_sk + and substr(ca_zip,1,5) <> substr(s_zip,1,5) + and ss_store_sk = s_store_sk + group by i_brand + ,i_brand_id + ,i_manufact_id + ,i_manufact + order by ext_price desc + ,i_brand + ,i_brand_id + ,i_manufact_id + ,i_manufact + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'MANAGER': { + 'type': "integer", + 'range': [1,100] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q20", + 'query': """ select i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + ,sum(cs_ext_sales_price) as itemrevenue + ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over + (partition by i_class) as revenueratio + from catalog_sales + ,item + ,date_dim + where cs_item_sk = i_item_sk + and i_category in ('{CATEGORY1}', '{CATEGORY2}','{CATEGORY3}') + and cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-01-01' as date) + and (cast('{YEAR}-01-01' as date) + interval '30' day) + group by i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + order by i_category + ,i_class + ,i_item_id + ,i_item_desc + ,revenueratio + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'CATEGORY': { + 'type': "list", + 'size': 3, + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q21", + 'query': """ select * + from(select w_warehouse_name + ,i_item_id + ,sum(case when (cast(d_date as date) < cast('{YEAR}-{MONTH}-{DAY}' as date)) + then inv_quantity_on_hand + else 0. end) as inv_before + ,sum(case when (cast(d_date as date) >= cast('{YEAR}-{MONTH}-{DAY}' as date)) + then inv_quantity_on_hand + else 0. end) as inv_after + from inventory + ,warehouse + ,item + ,date_dim + where i_current_price between 0.99 and 1.49 + and i_item_sk = inv_item_sk + and inv_warehouse_sk = w_warehouse_sk + and inv_date_sk = d_date_sk + and d_date between (cast('{YEAR}-{MONTH}-{DAY}' as date) - interval '30' day) + and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '30' day) + group by w_warehouse_name, i_item_id) x + where (case when inv_before > 0 + then CAST(inv_after AS FLOAT) / inv_before + else null + end) between 2.0/3.0 and 3.0/2.0 + order by w_warehouse_name is not null, w_warehouse_name + ,i_item_id is not null, i_item_id, inv_before, inv_after + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [1,12] + }, + 'DAY': { + 'type': "integer", + 'range': [1,28] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q22", + 'query': """select i_product_name + ,i_brand + ,i_class + ,i_category + ,avg(inv_quantity_on_hand) qoh + from inventory + ,date_dim + ,item + where inv_date_sk=d_date_sk + and inv_item_sk=i_item_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by i_product_name + ,i_brand + ,i_class + ,i_category with rollup + order by qoh is not null, qoh, i_product_name is not null, i_product_name, i_brand is not null, i_brand, i_class is not null, i_class, i_category is not null, i_category + limit 100""", + 'DBMS': { + 'MariaDB': """with total as (select i_product_name + ,i_brand + ,i_class + ,i_category + ,avg(inv_quantity_on_hand) qoh + from inventory + ,date_dim + ,item + where inv_date_sk=d_date_sk + and inv_item_sk=i_item_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by i_product_name + ,i_brand + ,i_class + ,i_category with rollup) + select * from total + order by qoh is not null, qoh, i_product_name is not null, i_product_name, i_brand is not null, i_brand, i_class is not null, i_class, i_category is not null, i_category + limit 100""", + 'MonetDB': """select i_product_name + ,i_brand + ,i_class + ,i_category + ,avg(inv_quantity_on_hand) qoh + from inventory + ,date_dim + ,item + where inv_date_sk=d_date_sk + and inv_item_sk=i_item_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by rollup(i_product_name + ,i_brand + ,i_class + ,i_category) + order by avg(inv_quantity_on_hand) is not null, avg(inv_quantity_on_hand), i_product_name is not null, i_product_name, i_brand is not null, i_brand, i_class is not null, i_class, i_category is not null, i_category + limit 100""", + 'PostgreSQL': """select i_product_name + ,i_brand + ,i_class + ,i_category + ,avg(inv_quantity_on_hand) qoh + from inventory + ,date_dim + ,item + where inv_date_sk=d_date_sk + and inv_item_sk=i_item_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by rollup(i_product_name + ,i_brand + ,i_class + ,i_category) + order by avg(inv_quantity_on_hand) is not null, avg(inv_quantity_on_hand), i_product_name is not null, i_product_name, i_brand is not null, i_brand, i_class is not null, i_class, i_category is not null, i_category + limit 100""" + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q23a+b", + 'query': ["""with frequent_ss_items as + (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt + from store_sales + ,date_dim + ,item + where ss_sold_date_sk = d_date_sk + and ss_item_sk = i_item_sk + and d_year in ({YEAR},{YEAR}+1,{YEAR}+2,{YEAR}+3) + group by substr(i_item_desc,1,30),i_item_sk,d_date + having count(*) >4), + max_store_sales as + (select max(csales) tpcds_cmax + from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales + from store_sales + ,customer + ,date_dim + where ss_customer_sk = c_customer_sk + and ss_sold_date_sk = d_date_sk + and d_year in ({YEAR},{YEAR}+1,{YEAR}+2,{YEAR}+3) + group by c_customer_sk) x), + best_ss_customer as + (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales + from store_sales + ,customer + where ss_customer_sk = c_customer_sk + group by c_customer_sk + having sum(ss_quantity*ss_sales_price) > ({TOPPERCENT}/100.0) * (select + * + from + max_store_sales)) + select sum(sales) + from (select cs_quantity*cs_list_price sales + from catalog_sales + ,date_dim + where d_year = {YEAR} + and d_moy = {MONTH} + and cs_sold_date_sk = d_date_sk + and cs_item_sk in (select item_sk from frequent_ss_items) + and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer) + union all + select ws_quantity*ws_list_price sales + from web_sales + ,date_dim + where d_year = {YEAR} + and d_moy = {MONTH} + and ws_sold_date_sk = d_date_sk + and ws_item_sk in (select item_sk from frequent_ss_items) + and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)) y + limit 100""", """with frequent_ss_items as + (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt + from store_sales + ,date_dim + ,item + where ss_sold_date_sk = d_date_sk + and ss_item_sk = i_item_sk + and d_year in ({YEAR},{YEAR} + 1,{YEAR} + 2,{YEAR} + 3) + group by substr(i_item_desc,1,30),i_item_sk,d_date + having count(*) >4), + max_store_sales as + (select max(csales) tpcds_cmax + from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales + from store_sales + ,customer + ,date_dim + where ss_customer_sk = c_customer_sk + and ss_sold_date_sk = d_date_sk + and d_year in ({YEAR},{YEAR}+1,{YEAR}+2,{YEAR}+3) + group by c_customer_sk) x), + best_ss_customer as + (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales + from store_sales + ,customer + where ss_customer_sk = c_customer_sk + group by c_customer_sk + having sum(ss_quantity*ss_sales_price) > ({TOPPERCENT}/100.0) * (select + * + from max_store_sales)) + select c_last_name,c_first_name,sales + from (select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales + from catalog_sales + ,customer + ,date_dim + where d_year = {YEAR} + and d_moy = {MONTH} + and cs_sold_date_sk = d_date_sk + and cs_item_sk in (select item_sk from frequent_ss_items) + and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer) + and cs_bill_customer_sk = c_customer_sk + group by c_last_name,c_first_name + union all + select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales + from web_sales + ,customer + ,date_dim + where d_year = {YEAR} + and d_moy = {MONTH} + and ws_sold_date_sk = d_date_sk + and ws_item_sk in (select item_sk from frequent_ss_items) + and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer) + and ws_bill_customer_sk = c_customer_sk + group by c_last_name,c_first_name) y + order by c_last_name,c_first_name,sales + limit 100"""], + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'TOPPERCENT': { + 'type': "integer", + 'range': [95,95] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q24a+b", + 'query': ["""with ssales as + (select c_last_name + ,c_first_name + ,s_store_name + ,ca_state + ,s_state + ,i_color + ,i_current_price + ,i_manager_id + ,i_units + ,i_size + ,sum({AMOUNTONE}) netpaid + from store_sales + ,store_returns + ,store + ,item + ,customer + ,customer_address + where ss_ticket_number = sr_ticket_number + and ss_item_sk = sr_item_sk + and ss_customer_sk = c_customer_sk + and ss_item_sk = i_item_sk + and ss_store_sk = s_store_sk + and c_current_addr_sk = ca_address_sk + and c_birth_country <> upper(ca_country) + and s_zip = ca_zip + and s_market_id={MARKET} + group by c_last_name + ,c_first_name + ,s_store_name + ,ca_state + ,s_state + ,i_color + ,i_current_price + ,i_manager_id + ,i_units + ,i_size) + select c_last_name + ,c_first_name + ,s_store_name + ,sum(netpaid) paid + from ssales + where i_color = '{COLOR}' + group by c_last_name + ,c_first_name + ,s_store_name + having sum(netpaid) > (select 0.05*avg(netpaid) + from ssales) + order by c_last_name + ,c_first_name + ,s_store_name + """, """ + with ssales as + (select c_last_name + ,c_first_name + ,s_store_name + ,ca_state + ,s_state + ,i_color + ,i_current_price + ,i_manager_id + ,i_units + ,i_size + ,sum({AMOUNTONE}) netpaid + from store_sales + ,store_returns + ,store + ,item + ,customer + ,customer_address + where ss_ticket_number = sr_ticket_number + and ss_item_sk = sr_item_sk + and ss_customer_sk = c_customer_sk + and ss_item_sk = i_item_sk + and ss_store_sk = s_store_sk + and c_current_addr_sk = ca_address_sk + and c_birth_country <> upper(ca_country) + and s_zip = ca_zip + and s_market_id = {MARKET} + group by c_last_name + ,c_first_name + ,s_store_name + ,ca_state + ,s_state + ,i_color + ,i_current_price + ,i_manager_id + ,i_units + ,i_size) + select c_last_name + ,c_first_name + ,s_store_name + ,sum(netpaid) paid + from ssales + where i_color = '{COLOR}' + group by c_last_name + ,c_first_name + ,s_store_name + having sum(netpaid) > (select 0.05*avg(netpaid) + from ssales) + order by c_last_name + ,c_first_name + ,s_store_name + """], + 'parameter': + { + 'MARKET': { + 'type': "integer", + 'range': [5,10] + }, + 'COLOR': { + 'type': "list", + 'range': ["bisque","black","blue","blush","chocolate","coral","cream","cyan","firebrick","frosted","gainsboro","ghost","goldenrod","green","grey","honeydew","hot","indian","ivory","khaki","lace","lavender","lawn","lime","linen","maroon","medium","midnight","mint","misty","moccasin","navy","olive","orange","orchid","pale","papaya","peach","peru","pink","plum","powder","puff","purple","red","rose","rosy","royal","saddle","salmon","sandy","seashell","sienna","sky","slate","smoke","snow","spring","steel","tan","thistle","tomato","turquoise","violet","wheat","white","yellow"] + }, + 'AMOUNTONE': { + 'type': "list", + 'range': ["ss_net_paid","ss_net_paid_inc_tax","ss_net_profit","ss_sales_price","ss_ext_sales_price"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q25", + 'query': """ select + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + ,{AGG}(ss_net_profit) as store_sales_profit + ,{AGG}(sr_net_loss) as store_returns_loss + ,{AGG}(cs_net_profit) as catalog_sales_profit + from + store_sales + ,store_returns + ,catalog_sales + ,date_dim d1 + ,date_dim d2 + ,date_dim d3 + ,store + ,item + where + d1.d_moy = 4 + and d1.d_year = {YEAR} + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and ss_customer_sk = sr_customer_sk + and ss_item_sk = sr_item_sk + and ss_ticket_number = sr_ticket_number + and sr_returned_date_sk = d2.d_date_sk + and d2.d_moy between 4 and 10 + and d2.d_year = {YEAR} + and sr_customer_sk = cs_bill_customer_sk + and sr_item_sk = cs_item_sk + and cs_sold_date_sk = d3.d_date_sk + and d3.d_moy between 4 and 10 + and d3.d_year = {YEAR} + group by + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + order by + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'AGG': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q26", + 'query': """ select i_item_id, + avg(cs_quantity) agg1, + avg(cs_list_price) agg2, + avg(cs_coupon_amt) agg3, + avg(cs_sales_price) agg4 + from catalog_sales, customer_demographics, date_dim, item, promotion + where cs_sold_date_sk = d_date_sk and + cs_item_sk = i_item_sk and + cs_bill_cdemo_sk = cd_demo_sk and + cs_promo_sk = p_promo_sk and + cd_gender = '{GEN}' and + cd_marital_status = '{MS}' and + cd_education_status = '{ES}' and + (p_channel_email = 'N' or p_channel_event = 'N') and + d_year = {YEAR} + group by i_item_id + order by i_item_id + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MS': { + 'type': "list", + 'range': ["M","S","D","W","U"] + }, + 'GEN': { + 'type': "list", + 'range': ["M","F"] + }, + 'ES': { + 'type': "list", + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q27", + 'query': """ select i_item_id, + s_state, grouping(s_state) g_state, + avg(ss_quantity) agg1, + avg(ss_list_price) agg2, + avg(ss_coupon_amt) agg3, + avg(ss_sales_price) agg4 + from store_sales, customer_demographics, date_dim, store, item + where ss_sold_date_sk = d_date_sk and + ss_item_sk = i_item_sk and + ss_store_sk = s_store_sk and + ss_cdemo_sk = cd_demo_sk and + cd_gender = '{GEN}' and + cd_marital_status = '{MS}' and + cd_education_status = '{ES}' and + d_year = {YEAR} and + s_state in ('{STATE1}','{STATE2}', '{STATE3}', '{STATE4}', '{STATE5}', '{STATE6}') + group by i_item_id, s_state with rollup + order by i_item_id + ,s_state + limit 100""", + 'DBMS': { + 'MariaDB': """ SELECT i_item_id, + s_state, + CASE WHEN s_state IS NULL THEN 1 ELSE 0 END AS g_state, + AVG(ss_quantity) AS agg1, + AVG(ss_list_price) AS agg2, + AVG(ss_coupon_amt) AS agg3, + AVG(ss_sales_price) AS agg4 + FROM store_sales + JOIN customer_demographics ON ss_cdemo_sk = cd_demo_sk + JOIN date_dim ON ss_sold_date_sk = d_date_sk + JOIN store ON ss_store_sk = s_store_sk + JOIN item ON ss_item_sk = i_item_sk + WHERE cd_gender = '{GEN}' + AND cd_marital_status = '{MS}' + AND cd_education_status = '{ES}' + AND d_year = {YEAR} + AND s_state IN ('{STATE1}', '{STATE2}', '{STATE3}', '{STATE4}', '{STATE5}', '{STATE6}') + GROUP BY i_item_id, s_state with ROLLUP + LIMIT 100""", + 'MonetDB': """ select i_item_id, + s_state, grouping(s_state) g_state, + avg(ss_quantity) agg1, + avg(ss_list_price) agg2, + avg(ss_coupon_amt) agg3, + avg(ss_sales_price) agg4 + from store_sales, customer_demographics, date_dim, store, item + where ss_sold_date_sk = d_date_sk and + ss_item_sk = i_item_sk and + ss_store_sk = s_store_sk and + ss_cdemo_sk = cd_demo_sk and + cd_gender = '{GEN}' and + cd_marital_status = '{MS}' and + cd_education_status = '{ES}' and + d_year = '{YEAR}' and + s_state in ('{STATE1}','{STATE2}', '{STATE3}', '{STATE4}', '{STATE5}', '{STATE6}') + group by rollup(i_item_id, s_state) + order by i_item_id + ,s_state + limit 100""", + 'PostgreSQL': """ select i_item_id, + s_state, grouping(s_state) g_state, + avg(ss_quantity) agg1, + avg(ss_list_price) agg2, + avg(ss_coupon_amt) agg3, + avg(ss_sales_price) agg4 + from store_sales, customer_demographics, date_dim, store, item + where ss_sold_date_sk = d_date_sk and + ss_item_sk = i_item_sk and + ss_store_sk = s_store_sk and + ss_cdemo_sk = cd_demo_sk and + cd_gender = '{GEN}' and + cd_marital_status = '{MS}' and + cd_education_status = '{ES}' and + d_year = '{YEAR}' and + s_state in ('{STATE1}','{STATE2}', '{STATE3}', '{STATE4}', '{STATE5}', '{STATE6}') + group by rollup(i_item_id, s_state) + order by i_item_id + ,s_state + limit 100""", + }, + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'STATE': { + 'type': "list", + 'size': 6, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'MS': { + 'type': "list", + 'range': ["M","S","D","W","U"] + }, + 'GEN': { + 'type': "list", + 'range': ["M","F"] + }, + 'ES': { + 'type': "list", + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree","Advanced Degree","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q28", + 'query': """ select * + from (select avg(ss_list_price) B1_LP + ,count(ss_list_price) B1_CNT + ,count(distinct ss_list_price) B1_CNTD + from store_sales + where ss_quantity between 0 and 5 + and (ss_list_price between {LISTPRICE1} and {LISTPRICE1}+10 + or ss_coupon_amt between {COUPONAMT1} and {COUPONAMT1}+1000 + or ss_wholesale_cost between {WHOLESALECOST1} and {WHOLESALECOST1}+20)) B1, + (select avg(ss_list_price) B2_LP + ,count(ss_list_price) B2_CNT + ,count(distinct ss_list_price) B2_CNTD + from store_sales + where ss_quantity between 6 and 10 + and (ss_list_price between {LISTPRICE2} and {LISTPRICE2}+10 + or ss_coupon_amt between {COUPONAMT2} and {COUPONAMT2}+1000 + or ss_wholesale_cost between {WHOLESALECOST2} and {WHOLESALECOST2}+20)) B2, + (select avg(ss_list_price) B3_LP + ,count(ss_list_price) B3_CNT + ,count(distinct ss_list_price) B3_CNTD + from store_sales + where ss_quantity between 11 and 15 + and (ss_list_price between {LISTPRICE3} and {LISTPRICE3}+10 + or ss_coupon_amt between {COUPONAMT3} and {COUPONAMT3}+1000 + or ss_wholesale_cost between {WHOLESALECOST3} and {WHOLESALECOST3}+20)) B3, + (select avg(ss_list_price) B4_LP + ,count(ss_list_price) B4_CNT + ,count(distinct ss_list_price) B4_CNTD + from store_sales + where ss_quantity between 16 and 20 + and (ss_list_price between {LISTPRICE4} and {LISTPRICE4}+10 + or ss_coupon_amt between {COUPONAMT4} and {COUPONAMT4}+1000 + or ss_wholesale_cost between {WHOLESALECOST4} and {WHOLESALECOST4}+20)) B4, + (select avg(ss_list_price) B5_LP + ,count(ss_list_price) B5_CNT + ,count(distinct ss_list_price) B5_CNTD + from store_sales + where ss_quantity between 21 and 25 + and (ss_list_price between {LISTPRICE5} and {LISTPRICE5}+10 + or ss_coupon_amt between {COUPONAMT5} and {COUPONAMT5}+1000 + or ss_wholesale_cost between {WHOLESALECOST5} and {WHOLESALECOST5}+20)) B5, + (select avg(ss_list_price) B6_LP + ,count(ss_list_price) B6_CNT + ,count(distinct ss_list_price) B6_CNTD + from store_sales + where ss_quantity between 26 and 30 + and (ss_list_price between {LISTPRICE6} and {LISTPRICE6}+10 + or ss_coupon_amt between {COUPONAMT6} and {COUPONAMT6}+1000 + or ss_wholesale_cost between {WHOLESALECOST6} and {WHOLESALECOST6}+20)) B6 + limit 100""", + 'parameter': + { + 'WHOLESALECOST': { + 'type': "integer", + 'size': 6, + 'range': [0,80] + }, + 'LISTPRICE': { + 'type': "integer", + 'size': 6, + 'range': [0,190] + }, + 'COUPONAMT': { + 'type': "integer", + 'size': 6, + 'range': [0,18000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q29", + 'query': """ select + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + ,{AGG}(ss_quantity) as store_sales_quantity + ,{AGG}(sr_return_quantity) as store_returns_quantity + ,{AGG}(cs_quantity) as catalog_sales_quantity + from + store_sales + ,store_returns + ,catalog_sales + ,date_dim d1 + ,date_dim d2 + ,date_dim d3 + ,store + ,item + where + d1.d_moy = {MONTH} + and d1.d_year = {YEAR} + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and ss_customer_sk = sr_customer_sk + and ss_item_sk = sr_item_sk + and ss_ticket_number = sr_ticket_number + and sr_returned_date_sk = d2.d_date_sk + and d2.d_moy between {MONTH} and {MONTH} + 3 + and d2.d_year = {YEAR} + and sr_customer_sk = cs_bill_customer_sk + and sr_item_sk = cs_item_sk + and cs_sold_date_sk = d3.d_date_sk + and d3.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + group by + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + order by + i_item_id + ,i_item_desc + ,s_store_id + ,s_store_name + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + }, + 'MONTH': { + 'type': "integer", + 'range': [4,4] + }, + 'AGG': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q30", + 'query': """ with customer_total_return as + (select wr_returning_customer_sk as ctr_customer_sk + ,ca_state as ctr_state, + sum(wr_return_amt) as ctr_total_return + from web_returns + ,date_dim + ,customer_address + where wr_returned_date_sk = d_date_sk + and d_year ={YEAR} + and wr_returning_addr_sk = ca_address_sk + group by wr_returning_customer_sk + ,ca_state) + select c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag + ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address + ,c_last_review_date,ctr_total_return + from customer_total_return ctr1 + ,customer_address + ,customer + where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 + from customer_total_return ctr2 + where ctr1.ctr_state = ctr2.ctr_state) + and ca_address_sk = c_current_addr_sk + and ca_state = '{STATE}' + and ctr1.ctr_customer_sk = c_customer_sk + order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag + ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address + ,c_last_review_date,ctr_total_return + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'STATE': { + 'type': "list", + 'range': ["TN"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q31", + 'query': """ with ss as + (select ca_county,d_qoy, d_year,sum(ss_ext_sales_price) as store_sales + from store_sales,date_dim,customer_address + where ss_sold_date_sk = d_date_sk + and ss_addr_sk=ca_address_sk + group by ca_county,d_qoy, d_year), + ws as + (select ca_county,d_qoy, d_year,sum(ws_ext_sales_price) as web_sales + from web_sales,date_dim,customer_address + where ws_sold_date_sk = d_date_sk + and ws_bill_addr_sk=ca_address_sk + group by ca_county,d_qoy, d_year) + select + ss1.ca_county + ,ss1.d_year + ,ws2.web_sales/ws1.web_sales web_q1_q2_increase + ,ss2.store_sales/ss1.store_sales store_q1_q2_increase + ,ws3.web_sales/ws2.web_sales web_q2_q3_increase + ,ss3.store_sales/ss2.store_sales store_q2_q3_increase + from + ss ss1 + ,ss ss2 + ,ss ss3 + ,ws ws1 + ,ws ws2 + ,ws ws3 + where + ss1.d_qoy = 1 + and ss1.d_year = {YEAR} + and ss1.ca_county = ss2.ca_county + and ss2.d_qoy = 2 + and ss2.d_year = {YEAR} + and ss2.ca_county = ss3.ca_county + and ss3.d_qoy = 3 + and ss3.d_year = {YEAR} + and ss1.ca_county = ws1.ca_county + and ws1.d_qoy = 1 + and ws1.d_year = {YEAR} + and ws1.ca_county = ws2.ca_county + and ws2.d_qoy = 2 + and ws2.d_year = {YEAR} + and ws1.ca_county = ws3.ca_county + and ws3.d_qoy = 3 + and ws3.d_year ={YEAR} + and case when ws1.web_sales > 0 then ws2.web_sales/ws1.web_sales else null end + > case when ss1.store_sales > 0 then ss2.store_sales/ss1.store_sales else null end + and case when ws2.web_sales > 0 then ws3.web_sales/ws2.web_sales else null end + > case when ss2.store_sales > 0 then ss3.store_sales/ss2.store_sales else null end + order by {AGG}""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'AGG': { + 'type': "list", + 'range': ["ss1.ca_county","ss1.d_year","web_q1_q2_increase","store_q1_q2_increase","web_q2_q3_increase","store_q2_q3_increase"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q32", + 'query': """ select sum(cs_ext_discount_amt) as "excess discount amount" + from + catalog_sales + ,item + ,date_dim + where + i_manufact_id = {IMID} + and i_item_sk = cs_item_sk + and d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '90' day) + and d_date_sk = cs_sold_date_sk + and cs_ext_discount_amt + > ( + select + 1.3 * avg(cs_ext_discount_amt) + from + catalog_sales + ,date_dim + where + cs_item_sk = i_item_sk + and d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '90' day) + and d_date_sk = cs_sold_date_sk + ) + limit 100; """, + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'IMID': { + 'type': "integer", + 'range': [1,1000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q33", + 'query': """with ss as ( + select + i_manufact_id,sum(ss_ext_sales_price) total_sales + from + store_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ss_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id), + cs as ( + select + i_manufact_id,sum(cs_ext_sales_price) total_sales + from + catalog_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and cs_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id), + ws as ( + select + i_manufact_id,sum(ws_ext_sales_price) total_sales + from + web_sales, + date_dim, + customer_address, + item + where + i_manufact_id in (select + i_manufact_id + from + item + where i_category in ('{CATEGORY}')) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ws_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_manufact_id) + select i_manufact_id ,sum(total_sales) total_sales + from (select * from ss + union all + select * from cs + union all + select * from ws) tmp1 + group by i_manufact_id + order by total_sales + limit 100""", + 'parameter': + { + 'GMT': { + 'type': "list", + 'range': ["-10.00","-9.00","-8.00","-7.00","-6.00","-5.00"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'CATEGORY': { + 'type': "list", + 'range': ["Books","Home","Electronics","Jewelry","Sports"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q34", + 'query': """ select c_last_name + ,c_first_name + ,c_salutation + ,c_preferred_cust_flag + ,ss_ticket_number + ,cnt from + (select ss_ticket_number + ,ss_customer_sk + ,count(*) cnt + from store_sales,date_dim,store,household_demographics + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_store_sk = store.s_store_sk + and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk + and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28) + and (household_demographics.hd_buy_potential = '{BPONE}' or + household_demographics.hd_buy_potential = '{BPTWO}') + and household_demographics.hd_vehicle_count > 0 + and (case when household_demographics.hd_vehicle_count > 0 + then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count + else null + end) > 1.2 + and date_dim.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + and store.s_county in ('{COUNTY1}','{COUNTY2}','{COUNTY3}','{COUNTY4}','{COUNTY5}','{COUNTY6}','{COUNTY7}','{COUNTY8}') + group by ss_ticket_number,ss_customer_sk) dn,customer + where ss_customer_sk = c_customer_sk + and cnt between 15 and 20 + order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc, ss_ticket_number""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'BPONE': { + 'type': "list", + 'range': ["1001-5000",">10000","501-1000"] + }, + 'BPTWO': { + 'type': "list", + 'range': ["0-500","Unknown","5001-10000"] + }, + 'COUNTY': { + 'type': "list", + 'size': 8, + 'range': ["Ziebach County","Zavala County","Zapata County","Yuma County","Yukon-Koyukuk Census Area","Yuba County","Young County","York County","Yolo County","Yoakum County","Yellowstone County","Yellow Medicine County","Yell County","Yazoo County","Yavapai County","Yates County","Yankton County","Yancey County","Yamhill County","Yalobusha County","Yakutat Borough","Yakima County","Yadkin County","Wythe County","Wyoming County","Wyandotte County","Wyandot County","Wright County","Worth County","Worcester County","Woodward County","Woodson County","Woods County","Woodruff County","Woodford County","Woodbury County","Wood County","Wolfe County","Wise County","Wirt County","Winston County","Winona County","Winneshiek County","Winnebago County","Winn Parish","Winkler County","Windsor County","Windham County","Winchester city","Wilson County","Williamson County","Williamsburg County","Williamsburg city","Williams County","Willacy County","Will County","Wilkinson County","Wilkin County","Wilkes County","Wilcox County","Wilbarger County","Wicomico County","Wichita County","Wibaux County","Whitman County","Whitley County","Whitfield County","Whiteside County","White Pine County","White County","Wheeler County","Wheatland County","Whatcom County","Wharton County","Wexford County","Wetzel County","Weston County","Westmoreland County","Westchester County","West Feliciana Parish","West Carroll Parish","West Baton Rouge Parish","Wells County","Weld County","Webster Parish","Webster County","Weber County","Webb County","Weakley County","Waynesboro city","Wayne County","Waushara County","Waupaca County","Waukesha County","Watonwan County","Watauga County","Washtenaw County","Washoe County","Washita County","Washington Parish","Washington County","Washburn County","Washakie County","Waseca County","Wasco County","Wasatch County","Warrick County","Warren County","Ware County","Ward County","Wapello County","Walworth County","Walton County","Walthall County","Walsh County","Wallowa County","Waller County","Wallace County","Walla Walla County","Walker County","Waldo County","Wakulla County","Wake County","Wahkiakum County","Wagoner County","Wadena County","Wade Hampton Census Area","Wabaunsee County","Wabasha County","Wabash County","Volusia County","Virginia Beach city","Vinton County","Vilas County","Vigo County","Victoria County","Vernon Parish","Vernon County","Vermillion County","Vermilion Parish","Vermilion County","Ventura County","Venango County","Vanderburgh County","Vance County","Van Zandt County","Van Wert County","Van Buren County","Valley County","Valencia County","Valdez-Cordova Census Area","Val Verde County","Uvalde County","Utah County","Upton County","Upson County","Upshur County","Union Parish","Union County","Unicoi County","Umatilla County","Ulster County","Uintah County","Uinta County","Tyrrell County","Tyler County","Twin Falls County","Twiggs County","Tuscola County","Tuscarawas County","Tuscaloosa County","Turner County","Tuolumne County","Tunica County","Tulsa County","Tulare County","Tucker County","Trumbull County","Trousdale County","Troup County","Tripp County","Trinity County","Trimble County","Trigg County","Treutlen County","Trempealeau County","Trego County","Treasure County","Travis County","Traverse County","Transylvania County","Traill County","Towns County","Towner County","Torrance County","Toombs County","Toole County","Tooele County","Tompkins County","Tom Green County","Tolland County","Todd County","Titus County","Tishomingo County","Tipton County","Tippecanoe County","Tippah County","Tioga County","Tillman County","Tillamook County","Tift County","Thurston County","Throckmorton County","Thomas County","Thayer County","Texas County","Teton County","Terry County","Terrell County","Terrebonne Parish","Tensas Parish","Teller County","Telfair County","Tehama County","Tazewell County","Taylor County","Tattnall County","Tate County","Tarrant County","Taos County","Tangipahoa Parish","Taney County","Tama County","Tallapoosa County","Tallahatchie County","Talladega County","Taliaferro County","Talbot County","Switzerland County","Swisher County","Swift County","Sweetwater County","Sweet Grass County","Swain County","Suwannee County","Sutton County","Sutter County","Sussex County","Susquehanna County","Surry County","Sunflower County","Sumter County","Sumner County","Summit County","Summers County","Sully County","Sullivan County","Suffolk County","Suffolk city","Sublette County","Stutsman County","Strafford County","Story County","Storey County","Stonewall County","Stone County","Stokes County","Stoddard County","Stillwater County","Stewart County","Stevens County","Steuben County","Sterling County","Stephenson County","Stephens County","Steele County","Stearns County","Staunton city","Starr County","Starke County","Stark County","Stanton County","Stanly County","Stanley County","Stanislaus County","Stafford County","Spotsylvania County","Spokane County","Spink County","Spencer County","Spartanburg County","Spalding County","Southampton County","Sonoma County","Somervell County","Somerset County","Solano County","Socorro County","Snyder County","Snohomish County","Smyth County","Smith County","Slope County","Skamania County","Skagit County","Sitka Borough","Siskiyou County","Sioux County","Simpson County","Silver Bow County","Sierra County","Sibley County","Shoshone County","Shiawassee County","Sherman County","Sheridan County","Sherburne County","Shenandoah County","Shelby County","Sheboygan County","Shawnee County","Shawano County","Shasta County","Sharp County","Sharkey County","Shannon County","Shackelford County","Seward County","Sevier County","Sequoyah County","Sequatchie County","Seneca County","Seminole County","Sedgwick County","Sebastian County","Searcy County","Scurry County","Screven County","Scotts Bluff County","Scott County","Scotland County","Scioto County","Schuylkill County","Schuyler County","Schoolcraft County","Schoharie County","Schley County","Schleicher County","Schenectady County","Sawyer County","Saunders County","Sauk County","Sarpy County","Sargent County","Saratoga County","Sarasota County","Santa Rosa County","Santa Fe County","Santa Cruz County","Santa Clara County","Santa Barbara County","Sanpete County","Sanilac County","Sangamon County","Sandusky County","Sandoval County","Sanders County","Sanborn County","San Saba County","San Patricio County","San Miguel County","San Mateo County","San Luis Obispo County","San Juan County","San Joaquin County","San Jacinto County","San Francisco County","San Diego County","San Bernardino County","San Benito County","San Augustine County","Sampson County","Saluda County","Salt Lake County","Saline County","Salem County","Salem city","Saguache County","Saginaw County","Sagadahoc County","Sacramento County","Sac County","Sabine Parish","Sabine County","Rutland County","Rutherford County","Russell County","Rusk County","Rush County","Runnels County","Rowan County","Routt County","Ross County","Rosebud County","Roseau County","Roscommon County","Roosevelt County","Rooks County","Rolette County","Rogers County","Roger Mills County","Rockwall County","Rockland County","Rockingham County","Rockdale County","Rockcastle County","Rockbridge County","Rock Island County","Rock County","Robeson County","Robertson County","Roberts County","Roanoke County","Roanoke city","Roane County","Riverside County","Ritchie County","Ripley County","Rio Grande County","Rio Blanco County","Rio Arriba County","Ringgold County","Riley County","Richmond County","Richmond city","Richland Parish","Richland County","Richardson County","Rich County","Rice County","Rhea County","Reynolds County","Republic County","Renville County","Rensselaer County","Reno County","Refugio County","Reeves County","Redwood County","Red Willow County","Red River Parish","Red River County","Red Lake County","Real County","Reagan County","Ray County","Rawlins County","Ravalli County","Rappahannock County","Rapides Parish","Ransom County","Rankin County","Randolph County","Randall County","Ramsey County","Ralls County","Raleigh County","Rains County","Radford city","Racine County","Rabun County","Quitman County","Queens County","Queen Anne County","Quay County","Putnam County","Pushmataha County","Pulaski County","Pueblo County","Prowers County","Providence County","Prince William County","Prince George County","Prince Edward County","Price County","Preston County","Presque Isle County","Presidio County","Prentiss County","Preble County","Pratt County","Prairie County","Powhatan County","Poweshiek County","Power County","Powell County","Powder River County","Potter County","Pottawattamie County","Pottawatomie County","Posey County","Portsmouth city","Porter County","Portage County","Poquoson city","Pope County","Pontotoc County","Pondera County","Polk County","Pointe Coupee Parish","Poinsett County","Pocahontas County","Plymouth County","Plumas County","Pleasants County","Platte County","Plaquemines Parish","Placer County","Piute County","Pittsylvania County","Pittsburg County","Pitt County","Pitkin County","Piscataquis County","Pipestone County","Pinellas County","Pine County","Pinal County","Pima County","Pike County","Pierce County","Pickett County","Pickens County","Pickaway County","Piatt County","Phillips County","Philadelphia County","Phelps County","Pettis County","Petroleum County","Petersburg city","Person County","Pershing County","Perry County","Perquimans County","Perkins County","Pepin County","Peoria County","Penobscot County","Pennington County","Pendleton County","Pender County","Pend Oreille County","Pemiscot County","Pembina County","Pecos County","Pearl River County","Peach County","Payne County","Payette County","Pawnee County","Paulding County","Patrick County","Passaic County","Pasquotank County","Pasco County","Parmer County","Parker County","Parke County","Park County","Panola County","Pamlico County","Palo Pinto County","Palo Alto County","Palm Beach County","Page County","Pacific County","Ozaukee County","Ozark County","Oxford County","Owyhee County","Owsley County","Owen County","Overton County","Outagamie County","Ouray County","Ouachita Parish","Ouachita County","Otter Tail County","Ottawa County","Otsego County","Otoe County","Otero County","Oswego County","Oscoda County","Osceola County","Osborne County","Osage County","Orleans Parish","Orleans County","Oregon County","Orangeburg County","Orange County","Ontonagon County","Ontario County","Onslow County","Onondaga County","Oneida County","Olmsted County","Oliver County","Oldham County","Oktibbeha County","Okmulgee County","Oklahoma County","Okfuskee County","Okeechobee County","Okanogan County","Okaloosa County","Ohio County","Oglethorpe County","Ogle County","Ogemaw County","Oconto County","Oconee County","Ochiltree County","Oceana County","Ocean County","Obion County","Oakland County","O-Brien County","Nye County","Nueces County","Nuckolls County","Noxubee County","Nowata County","Nottoway County","Norton County","Norton city","Northwest Arctic Borough","Northumberland County","Northampton County","North Slope Borough","Norman County","Norfolk County","Norfolk city","Nome Census Area","Nolan County","Nodaway County","Nobles County","Noble County","Niobrara County","Nicollet County","Nicholas County","Niagara County","Nez Perce County","Newton County","Newport News city","Newport County","Newberry County","Newaygo County","New York County","New Madrid County","New London County","New Kent County","New Haven County","New Hanover County","New Castle County","Nevada County","Ness County","Neshoba County","Neosho County","Nemaha County","Nelson County","Navarro County","Navajo County","Natrona County","Natchitoches Parish","Nassau County","Nash County","Napa County","Nantucket County","Nance County","Nacogdoches County","Musselshell County","Muskogee County","Muskingum County","Muskegon County","Muscogee County","Muscatine County","Murray County","Multnomah County","Muhlenberg County","Mower County","Mountrail County","Moultrie County","Motley County","Morton County","Morrow County","Morrison County","Morris County","Morrill County","Morgan County","Morehouse Parish","Mora County","Moore County","Moody County","Montrose County","Montour County","Montmorency County","Montgomery County","Montezuma County","Monterey County","Montcalm County","Montague County","Monroe County","Monongalia County","Monona County","Mono County","Monmouth County","Moniteau County","Mohave County","Moffat County","Modoc County","Mobile County","Mitchell County","Missoula County","Mississippi County","Missaukee County","Minnehaha County","Minidoka County","Mingo County","Mineral County","Miner County","Milwaukee County","Mills County","Miller County","Mille Lacs County","Millard County","Milam County","Mifflin County","Midland County","Middlesex County","Miami County","Metcalfe County","Mesa County","Merrimack County","Merrick County","Meriwether County","Mercer County","Merced County","Menominee County","Menifee County","Mendocino County","Menard County","Mellette County","Meigs County","Meeker County","Medina County","Mecosta County","Mecklenburg County","Meagher County","Meade County","McPherson County","McNairy County","McMullen County","McMinn County","McLeod County","McLennan County","McLean County","McKinley County","McKenzie County","McKean County","McIntosh County","McHenry County","McDuffie County","McDowell County","McDonough County","McDonald County","McCurtain County","McCulloch County","McCreary County","McCracken County","McCormick County","McCook County","McCone County","McClain County","Mayes County","Maverick County","Maury County","Maui County","Mathews County","Matanuska-Susitna Borough","Matagorda County","Massac County","Mason County","Martinsville city","Martin County","Marshall County","Marquette County","Marlboro County","Mariposa County","Marion County","Marinette County","Marin County","Maries County","Maricopa County","Marengo County","Marathon County","Manitowoc County","Manistee County","Manatee County","Manassas Park city","Manassas city","Malheur County","Major County","Mahoning County","Mahnomen County","Mahaska County","Magoffin County","Madison Parish","Madison County","Madera County","Macoupin County","Macon County","Macomb County","Mackinac County","Lyon County","Lynn County","Lynchburg city","Lyman County","Lycoming County","Luzerne County","Lunenburg County","Luna County","Lumpkin County","Luce County","Lucas County","Lubbock County","Lowndes County","Loving County","Love County","Loup County","Louisa County","Loudoun County","Loudon County","Los Angeles County","Los Alamos County","Lorain County","Lonoke County","Long County","Logan County","Llano County","Livingston Parish","Livingston County","Live Oak County","Little River County","Litchfield County","Lipscomb County","Linn County","Lincoln Parish","Lincoln County","Limestone County","Licking County","Liberty County","Lexington County","Lexington city","Lewis County","Lewis and Clark County","Levy County","Letcher County","Leslie County","Leon County","Lenoir County","Lenawee County","Lemhi County","Lehigh County","Leflore County","Leelanau County","Lee County","Lebanon County","Leavenworth County","Leake County","Lea County","Le Sueur County","Le Flore County","Lawrence County","Lavaca County","Laurens County","Laurel County","Lauderdale County","Latimer County","Latah County","Lassen County","Las Animas County","Larue County","Larimer County","Laramie County","Lapeer County","Lanier County","Langlade County","Lane County","Lander County","Lancaster County","Lampasas County","LaMoure County","Lamoille County","Lamb County","Lamar County","Lake of the Woods County","Lake County","Lake and Peninsula Borough","Lagrange County","Lafourche Parish","Lafayette Parish","Lafayette County","Laclede County","Lackawanna County","Lac qui Parle County","Labette County","La Salle Parish","La Salle County","La Porte County","La Plata County","La Paz County","La Crosse County","Kossuth County","Kosciusko County","Kootenai County","Koochiching County","Kodiak Island Borough","Knox County","Knott County","Klickitat County","Kleberg County","Klamath County","Kittson County","Kittitas County","Kitsap County","Kit Carson County","Kiowa County","Kinney County","Kingsbury County","Kings County","Kingman County","Kingfisher County","King William County","King George County","King County","King and Queen County","Kimble County","Kimball County","Kidder County","Keya Paha County","Keweenaw County","Kewaunee County","Ketchikan Gateway Borough","Kershaw County","Kerr County","Kern County","Keokuk County","Kenton County","Kent County","Kenosha County","Kennebec County","Kenedy County","Kendall County","Kenai Peninsula Borough","Kemper County","Keith County","Kearny County","Kearney County","Kay County","Kaufman County","Kauai County","Karnes County","Kankakee County","Kane County","Kandiyohi County","Kanawha County","Kanabec County","Kalkaska County","Kalamazoo County","Juniata County","Juneau County","Juneau Borough","Judith Basin County","Juab County","Josephine County","Jones County","Johnston County","Johnson County","Jo Daviess County","Jim Wells County","Jim Hogg County","Jewell County","Jessamine County","Jersey County","Jerome County","Jerauld County","Jennings County","Jenkins County","Jefferson Parish","Jefferson Davis Parish","Jefferson Davis County","Jefferson County","Jeff Davis County","Jay County","Jasper County","James City County","Jackson Parish","Jackson County","Jack County","Izard County","Itawamba County","Itasca County","Issaquena County","Isle of Wight County","Island County","Isanti County","Isabella County","Irwin County","Iroquois County","Iron County","Irion County","Iredell County","Iowa County","Iosco County","Ionia County","Inyo County","Ingham County","Indiana County","Indian River County","Independence County","Imperial County","Idaho County","Ida County","Iberville Parish","Iberia Parish","Hyde County","Hutchinson County","Huron County","Huntington County","Huntingdon County","Hunterdon County","Hunt County","Humphreys County","Humboldt County","Hughes County","Huerfano County","Hudspeth County","Hudson County","Hubbard County","Howell County","Howard County","Houston County","Houghton County","Hot Springs County","Hot Spring County","Horry County","Hopkins County","Hopewell city","Hooker County","Hood River County","Hood County","Honolulu County","Holt County","Holmes County","Hoke County","Hodgeman County","Hockley County","Hocking County","Hitchcock County","Hinsdale County","Hinds County","Hillsdale County","Hillsborough County","Hill County","Highlands County","Highland County","Hidalgo County","Hickory County","Hickman County","Hettinger County","Hertford County","Hernando County","Herkimer County","Henry County","Henrico County","Hennepin County","Hendry County","Hendricks County","Henderson County","Hempstead County","Hemphill County","Heard County","Haywood County","Hays County","Hayes County","Hawkins County","Hawaii County","Haskell County","Harvey County","Hartley County","Hartford County","Hart County","Harrisonburg city","Harrison County","Harris County","Harper County","Harney County","Harnett County","Harmon County","Harlan County","Harford County","Hardy County","Harding County","Hardin County","Hardeman County","Hardee County","Haralson County","Hanson County","Hansford County","Hanover County","Hand County","Hancock County","Hampton County","Hampton city","Hampshire County","Hampden County","Hamlin County","Hamilton County","Hamblen County","Hall County","Halifax County","Hale County","Haines Borough","Habersham County","Haakon County","Gwinnett County","Guthrie County","Gunnison County","Gulf County","Guilford County","Guernsey County","Guadalupe County","Grundy County","Grimes County","Griggs County","Grenada County","Gregory County","Gregg County","Greer County","Greenwood County","Greenville County","Greenup County","Greensville County","Greenlee County","Greene County","Greenbrier County","Green Lake County","Green County","Greeley County","Grayson County","Grays Harbor County","Gray County","Graves County","Gratiot County","Granville County","Grant Parish","Grant County","Granite County","Grand Traverse County","Grand Isle County","Grand Forks County","Grand County","Grainger County","Graham County","Grafton County","Grady County","Gove County","Gosper County","Goshen County","Gordon County","Gooding County","Goodhue County","Goochland County","Gonzales County","Goliad County","Golden Valley County","Gogebic County","Glynn County","Gloucester County","Glenn County","Glasscock County","Glascock County","Gladwin County","Glades County","Glacier County","Gilpin County","Gilmer County","Gilliam County","Gillespie County","Giles County","Gilchrist County","Gila County","Gibson County","Georgetown County","George County","Gentry County","Geneva County","Genesee County","Gem County","Geauga County","Geary County","Gates County","Gaston County","Gasconade County","Garza County","Garvin County","Garrett County","Garrard County","Garland County","Garfield County","Garden County","Galveston County","Gallia County","Gallatin County","Galax city","Gaines County","Gage County","Gadsden County","Furnas County","Fulton County","Frontier County","Frio County","Fresno County","Fremont County","Freestone County","Freeborn County","Fredericksburg city","Frederick County","Franklin Parish","Franklin County","Franklin city","Fountain County","Foster County","Fort Bend County","Forsyth County","Forrest County","Forest County","Ford County","Fond du Lac County","Foard County","Fluvanna County","Floyd County","Florence County","Fleming County","Flathead County","Flagler County","Fisher County","Finney County","Fillmore County","Ferry County","Fergus County","Fentress County","Fayette County","Fauquier County","Faulkner County","Faulk County","Faribault County","Fannin County","Falls County","Falls Church city","Fallon County","Fall River County","Fairfield County","Fairfax County","Fairfax city","Fairbanks North Star Borough","Evans County","Evangeline Parish","Eureka County","Etowah County","Estill County","Essex County","Esmeralda County","Escambia County","Erie County","Erath County","Emporia city","Emmons County","Emmet County","Emery County","Emanuel County","Elmore County","Ellsworth County","Ellis County","Elliott County","Elko County","Elkhart County","Elk County","Elbert County","El Paso County","El Dorado County","Effingham County","Edwards County","Edmunds County","Edmonson County","Edgefield County","Edgecombe County","Edgar County","Eddy County","Ector County","Echols County","Eau Claire County","Eaton County","Eastland County","East Feliciana Parish","East Carroll Parish","East Baton Rouge Parish","Early County","Eagle County","Dyer County","Duval County","Dutchess County","Durham County","Duplin County","DuPage County","Dunn County","Dunklin County","Dundy County","Dukes County","Duchesne County","Dubuque County","Dubois County","Drew County","Douglas County","Dougherty County","Dorchester County","Door County","Dooly County","Donley County","Doniphan County","Dona Ana County","Dolores County","Dodge County","Doddridge County","Dixon County","Dixie County","Divide County","District of Columbia","Dinwiddie County","Dimmit County","Dillon County","Dillingham Census Area","Dickson County","Dickinson County","Dickey County","Dickenson County","Dickens County","DeWitt County","Dewey County","Deuel County","DeSoto County","Desha County","Deschutes County","Des Moines County","Denver County","Denton County","Dent County","Denali Borough","Delta County","Delaware County","Del Norte County","DeKalb County","Defiance County","Deer Lodge County","Decatur County","DeBaca County","Dearborn County","Deaf Smith County","De Witt County","De Soto Parish","De Kalb County","Day County","Dawson County","Dawes County","Davison County","Davis County","Daviess County","Davie County","Davidson County","Dauphin County","Darlington County","Darke County","Dare County","Danville city","Daniels County","Dane County","Dallas County","Dallam County","Dale County","Dakota County","Daggett County","Dade County","Cuyahoga County","Custer County","Curry County","Currituck County","Cuming County","Cumberland County","Culpeper County","Cullman County","Culberson County","Crowley County","Crow Wing County","Cross County","Crosby County","Crook County","Crockett County","Crittenden County","Crisp County","Crenshaw County","Creek County","Crawford County","Craven County","Crane County","Craighead County","Craig County","Cowlitz County","Cowley County","Coweta County","Covington County","Covington city","Cottonwood County","Cotton County","Cottle County","Costilla County","Coshocton County","Coryell County","Cortland County","Corson County","Copiah County","Coosa County","Coos County","Cooper County","Cooke County","Cook County","Conway County","Converse County","Contra Costa County","Conejos County","Conecuh County","Concordia Parish","Concho County","Comanche County","Comal County","Colusa County","Columbus County","Columbiana County","Columbia County","Colquitt County","Colorado County","Colonial Heights city","Collingsworth County","Collin County","Collier County","Colleton County","Colfax County","Coles County","Coleman County","Cole County","Colbert County","Coke County","Coffey County","Coffee County","Codington County","Coconino County","Cocke County","Cochran County","Cochise County","Cobb County","Coal County","Coahoma County","Cloud County","Clinton County","Clinch County","Clifton Forge city","Cleveland County","Clermont County","Cleburne County","Clearwater County","Clearfield County","Clear Creek County","Clayton County","Clay County","Clatsop County","Clarke County","Clark County","Clarion County","Clarendon County","Clare County","Clallam County","Claiborne Parish","Claiborne County","Clackamas County","Citrus County","Cimarron County","Cibola County","Churchill County","Christian County","Chowan County","Chouteau County","Choctaw County","Chittenden County","Chisago County","Chippewa County","Chilton County","Childress County","Chicot County","Chickasaw County","Cheyenne County","Chesterfield County","Chester County","Cheshire County","Chesapeake city","Cherry County","Cherokee County","Chenango County","Chemung County","Chelan County","Cheboygan County","Cheatham County","Chaves County","Chautauqua County","Chattooga County","Chattahoochee County","Chatham County","Chase County","Charlton County","Charlottesville city","Charlotte County","Charlevoix County","Charleston County","Charles Mix County","Charles County","Charles City County","Chariton County","Champaign County","Chambers County","Chaffee County","Cerro Gordo County","Centre County","Cedar County","Cecil County","Cayuga County","Cavalier County","Cattaraugus County","Catron County","Catoosa County","Catawba County","Catahoula Parish","Caswell County","Castro County","Cassia County","Cass County","Casey County","Cascade County","Carver County","Carteret County","Carter County","Carson County","Carson City","Carroll County","Caroline County","Carlton County","Carlisle County","Caribou County","Carbon County","Cape May County","Cape Girardeau County","Canyon County","Cannon County","Candler County","Canadian County","Campbell County","Camp County","Cameron Parish","Cameron County","Camden County","Cambria County","Camas County","Calvert County","Calumet County","Calloway County","Callaway County","Callahan County","Calhoun County","Caledonia County","Caldwell Parish","Caldwell County","Calcasieu Parish","Calaveras County","Caddo Parish","Caddo County","Cache County","Cabell County","Cabarrus County","Butts County","Butte County","Butler County","Burt County","Burnett County","Burnet County","Burlington County","Burleson County","Burleigh County","Burke County","Bureau County","Buncombe County","Bullock County","Bulloch County","Bullitt County","Buffalo County","Buena Vista County","Buena Vista city","Bucks County","Buckingham County","Buchanan County","Bryan County","Brunswick County","Brule County","Brown County","Broward County","Broome County","Brooks County","Brookings County","Brooke County","Bronx County","Broadwater County","Bristol County","Bristol city","Bristol Bay Borough","Briscoe County","Brewster County","Brevard County","Bremer County","Breckinridge County","Breathitt County","Brazos County","Brazoria County","Braxton County","Brantley County","Branch County","Bradley County","Bradford County","Bracken County","Boyle County","Boyd County","Box Elder County","Box Butte County","Bowman County","Bowie County","Bourbon County","Boundary County","Boulder County","Bottineau County","Botetourt County","Bossier Parish","Bosque County","Borden County","Boone County","Bonneville County","Bonner County","Bond County","Bon Homme County","Bollinger County","Bolivar County","Boise County","Blue Earth County","Blount County","Bledsoe County","Bleckley County","Bland County","Blanco County","Blair County","Blaine County","Bladen County","Blackford County","Black Hawk County","Bingham County","Billings County","Big Stone County","Big Horn County","Bienville Parish","Bibb County","Bexar County","Bethel Census Area","Bertie County","Berrien County","Bernalillo County","Berkshire County","Berks County","Berkeley County","Bergen County","Benzie County","Benton County","Bent County","Benson County","Bennington County","Bennett County","Benewah County","Ben Hill County","Beltrami County","Belmont County","Bell County","Belknap County","Bee County","Bedford County","Bedford city","Beckham County","Becker County","Beaverhead County","Beaver County","Beauregard Parish","Beaufort County","Bear Lake County","Beadle County","Baylor County","Bayfield County","Bay County","Baxter County","Bath County","Bates County","Bastrop County","Bartow County","Barton County","Bartholomew County","Barry County","Barrow County","Barron County","Barren County","Barnwell County","Barnstable County","Barnes County","Barbour County","Barber County","Baraga County","Bannock County","Banner County","Banks County","Bandera County","Bamberg County","Baltimore County","Baltimore city","Ballard County","Baldwin County","Baker County","Bailey County","Bacon County","Baca County","Avoyelles Parish","Avery County","Autauga County","Austin County","Aurora County","Augusta County","Auglaize County","Audubon County","Audrain County","Attala County","Atoka County","Atlantic County","Atkinson County","Athens County","Atchison County","Atascosa County","Assumption Parish","Asotin County","Ashtabula County","Ashley County","Ashland County","Ashe County","Ascension Parish","Arthur County","Aroostook County","Armstrong County","Arlington County","Arkansas County","Arenac County","Archuleta County","Archer County","Arapahoe County","Aransas County","Appomattox County","Appling County","Appanoose County","Apache County","Antrim County","Antelope County","Anson County","Anoka County","Anne Arundel County","Angelina County","Androscoggin County","Andrews County","Andrew County","Anderson County","Anchorage Borough","Amite County","Amherst County","Amelia County","Amador County","Alpine County","Alpena County","Allendale County","Allen Parish","Allen County","Allegheny County","Alleghany County","Allegany County","Allegan County","Allamakee County","Alger County","Alfalfa County","Alexandria city","Alexander County","Aleutians West Census Area","Aleutians East Borough","Alcorn County","Alcona County","Albemarle County","Albany County","Alamosa County","Alameda County","Alamance County","Alachua County","Aitkin County","Aiken County","Addison County","Adams County","Adair County","Ada County","Accomack County","Acadia Parish","Abbeville County"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q35", + 'query': """ select + ca_state, + cd_gender, + cd_marital_status, + cd_dep_count, + count(*) cnt1, + {AGGONE}(cd_dep_count) {AGGONE}_cd_dep_count1, + {AGGTWO}(cd_dep_count) {AGGTWO}_cd_dep_count2, + {AGGTHREE}(cd_dep_count) {AGGTHREE}_cd_dep_count3, + cd_dep_employed_count, + count(*) cnt2, + {AGGONE}(cd_dep_employed_count) {AGGONE}_cd_dep_employed_count1, + {AGGTWO}(cd_dep_employed_count) {AGGTWO}_cd_dep_employed_count2, + {AGGTHREE}(cd_dep_employed_count) {AGGTHREE}_cd_dep_employed_count3, + cd_dep_college_count, + count(*) cnt3, + {AGGONE}(cd_dep_college_count) {AGGONE}_cd_dep_college_count1, + {AGGTWO}(cd_dep_college_count) {AGGTWO}_cd_dep_college_count2, + {AGGTHREE}(cd_dep_college_count) {AGGTHREE}_cd_dep_college_count3 + from + customer c,customer_address ca,customer_demographics + where + c.c_current_addr_sk = ca.ca_address_sk and + cd_demo_sk = c.c_current_cdemo_sk and + exists (select * + from store_sales,date_dim + where c.c_customer_sk = ss_customer_sk and + ss_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_qoy < 4) and + (exists (select * + from web_sales,date_dim + where c.c_customer_sk = ws_bill_customer_sk and + ws_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_qoy < 4) or + exists (select * + from catalog_sales,date_dim + where c.c_customer_sk = cs_ship_customer_sk and + cs_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_qoy < 4)) + group by ca_state, + cd_gender, + cd_marital_status, + cd_dep_count, + cd_dep_employed_count, + cd_dep_college_count + order by ca_state is not null, ca_state, + cd_gender is not null, cd_gender, + cd_marital_status is not null, cd_marital_status, + cd_dep_count is not null, cd_dep_count, + cd_dep_employed_count is not null, cd_dep_employed_count, + cd_dep_college_count is not null, cd_dep_college_count + limit 100""", + 'parameter': + { + 'AGGONE': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + }, + 'AGGTHREE': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + }, + 'AGGTWO': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q36", + 'query': """ select + sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,item + ,store + where + d1.d_year = {YEAR} + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and s_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}','{STATE8}') + group by i_category,i_class with rollup + order by + lochierarchy desc + ,case when lochierarchy = 0 then i_category is not null end + ,case when lochierarchy = 0 then i_category end + ,rank_within_parent + limit 100""", + 'DBMS': { + 'MariaDB': """ SELECT + gross_margin, + i_category, + i_class, + CASE WHEN i_category IS NULL THEN 1 ELSE 0 END + + CASE WHEN i_class IS NULL THEN 1 ELSE 0 END AS lochierarchy, + RANK() OVER ( + PARTITION BY lochierarchy, + CASE WHEN i_class IS NOT NULL THEN i_category END + ORDER BY gross_margin ASC + ) AS rank_within_parent + FROM ( + SELECT + i_category, + i_class, + SUM(ss_net_profit) / SUM(ss_ext_sales_price) AS gross_margin, + CASE WHEN i_category IS NULL THEN 1 ELSE 0 END + + CASE WHEN i_class IS NULL THEN 1 ELSE 0 END AS lochierarchy + FROM + store_sales + JOIN date_dim AS d1 ON d1.d_date_sk = ss_sold_date_sk + JOIN item ON i_item_sk = ss_item_sk + JOIN store ON s_store_sk = ss_store_sk + WHERE + d1.d_year = {YEAR} + AND s_state IN ('{STATE1}', '{STATE2}', '{STATE3}', '{STATE4}', '{STATE5}', '{STATE6}', '{STATE7}', '{STATE8}') + GROUP BY i_category, i_class WITH ROLLUP + ) AS summary + order by + lochierarchy is not null, lochierarchy desc + ,case when lochierarchy = 0 then i_category is not null end + ,case when lochierarchy = 0 then i_category end + ,rank_within_parent + LIMIT 100""", + 'MonetDB': """select * FROM + ( + select + sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,item + ,store + where + d1.d_year = {YEAR} + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and s_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}','{STATE8}') + group by rollup(i_category,i_class) + ) tmp + order by + lochierarchy is not null, lochierarchy desc + ,case when lochierarchy = 0 then i_category is not null end + ,case when lochierarchy = 0 then i_category end + ,rank_within_parent + limit 100""", + 'PostgreSQL': """ select + sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,item + ,store + where + d1.d_year = {YEAR} + and d1.d_date_sk = ss_sold_date_sk + and i_item_sk = ss_item_sk + and s_store_sk = ss_store_sk + and s_state in ('{STATE1}','{STATE2}','{STATE3}','{STATE4}','{STATE5}','{STATE6}','{STATE7}','{STATE8}') + group by rollup(i_category,i_class) + order by + grouping(i_category)+grouping(i_class) is not null, grouping(i_category)+grouping(i_class) desc + ,case when grouping(i_category)+grouping(i_class) = 0 then i_category is not null end + ,case when grouping(i_category)+grouping(i_class) = 0 then i_category end + ,rank_within_parent + limit 100""" + }, + 'parameter': + { + 'STATE': { + 'type': "list", + 'size': 8, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DC","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q37", + 'query': """ select i_item_id + ,i_item_desc + ,i_current_price + from item, inventory, date_dim, catalog_sales + where i_current_price between {PRICE} and {PRICE} + 30 + and inv_item_sk = i_item_sk + and d_date_sk=inv_date_sk + and d_date between cast('{YEAR}-{MONTH}-01' as date) and (cast('{YEAR}-{MONTH}-01' as date) + interval '60' day) + and i_manufact_id in ({MANUFACT_ID1},{MANUFACT_ID2},{MANUFACT_ID3},{MANUFACT_ID4}) + and inv_quantity_on_hand between 100 and 500 + and cs_item_sk = i_item_sk + group by i_item_id,i_item_desc,i_current_price + order by i_item_id + limit 100""", + 'parameter': + { + 'MANUFACT_ID': { + 'type': "integer", + 'size': 4, + 'range': [667,1000] + }, + 'PRICE': { + 'type': "integer", + 'range': [10,70] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q38", + 'query': """select count(*) as counter from ( + select distinct c_last_name, c_first_name, d_date + from store_sales, date_dim, customer + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS} + 11 + intersect + select distinct c_last_name, c_first_name, d_date + from catalog_sales, date_dim, customer + where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk + and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS} + 11 + intersect + select distinct c_last_name, c_first_name, d_date + from web_sales, date_dim, customer + where web_sales.ws_sold_date_sk = date_dim.d_date_sk + and web_sales.ws_bill_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS} + 11 + ) hot_cust + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q39a+b", + 'query': ["""with inv as + (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy + ,stdev,mean, case mean when 0 then null else stdev/mean end cov + from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy + ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean + from inventory + ,item + ,warehouse + ,date_dim + where inv_item_sk = i_item_sk + and inv_warehouse_sk = w_warehouse_sk + and inv_date_sk = d_date_sk + and d_year ={YEAR} + group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo + where case mean when 0 then 0 else stdev/mean end > 1) + select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov + ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov + from inv inv1,inv inv2 + where inv1.i_item_sk = inv2.i_item_sk + and inv1.w_warehouse_sk = inv2.w_warehouse_sk + and inv1.d_moy={MONTH} + and inv2.d_moy={MONTH}+1 + order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov + ,inv2.d_moy,inv2.mean, inv2.cov + """, """ with inv as + (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy + ,stdev,mean, case mean when 0 then null else stdev/mean end cov + from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy + ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean + from inventory + ,item + ,warehouse + ,date_dim + where inv_item_sk = i_item_sk + and inv_warehouse_sk = w_warehouse_sk + and inv_date_sk = d_date_sk + and d_year ={YEAR} + group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo + where case mean when 0 then 0 else stdev/mean end > 1) + select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov + ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov + from inv inv1,inv inv2 + where inv1.i_item_sk = inv2.i_item_sk + and inv1.w_warehouse_sk = inv2.w_warehouse_sk + and inv1.d_moy={MONTH} + and inv2.d_moy={MONTH}+1 + and inv1.cov > 1.5 + order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov + ,inv2.d_moy,inv2.mean, inv2.cov + """], + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q40", + 'query': """ select + w_state + ,i_item_id + ,sum(case when (cast(d_date as date) < cast('{YEAR}-{MONTH}-01' as date)) + then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_before + ,sum(case when (cast(d_date as date) >= cast('{YEAR}-{MONTH}-01' as date)) + then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_after + from + catalog_sales left outer join catalog_returns on + (cs_order_number = cr_order_number + and cs_item_sk = cr_item_sk) + ,warehouse + ,item + ,date_dim + where + i_current_price between 0.99 and 1.49 + and i_item_sk = cs_item_sk + and cs_warehouse_sk = w_warehouse_sk + and cs_sold_date_sk = d_date_sk + and d_date between (cast('{YEAR}-{MONTH}-01' as date) - interval '30' day) + and (cast('{YEAR}-{MONTH}-01' as date) + interval '30' day) + group by + w_state,i_item_id + order by w_state,i_item_id + limit 100""", + 'parameter': + { + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'MONTH': { + 'type': "integer", + 'range': [2,7] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q41", + 'query': """ select distinct(i_product_name) + from item i1 + where i_manufact_id between {MANUFACT} and {MANUFACT}+40 + and (select count(*) as item_cnt + from item + where (i_manufact = i1.i_manufact and + ((i_category = 'Women' and + (i_color = '{COLOR1}' or i_color = '{COLOR2}') and + (i_units = '{UNIT1}' or i_units = '{UNIT2}') and + (i_size = '{SIZE1}' or i_size = '{SIZE2}') + ) or + (i_category = 'Women' and + (i_color = '{COLOR3}' or i_color = '{COLOR4}') and + (i_units = '{UNIT3}' or i_units = '{UNIT4}') and + (i_size = '{SIZE3}' or i_size = '{SIZE4}') + ) or + (i_category = 'Men' and + (i_color = '{COLOR5}' or i_color = '{COLOR6}') and + (i_units = '{UNIT5}' or i_units = '{UNIT6}') and + (i_size = '{SIZE5}' or i_size = '{SIZE6}') + ) or + (i_category = 'Men' and + (i_color = '{COLOR7}' or i_color = '{COLOR8}') and + (i_units = '{UNIT7}' or i_units = '{UNIT8}') and + (i_size = '{SIZE1}' or i_size = '{SIZE2}') + ))) or + (i_manufact = i1.i_manufact and + ((i_category = 'Women' and + (i_color = '{COLOR9}' or i_color = '{COLOR10}') and + (i_units = '{UNIT9}' or i_units = '{UNIT10}') and + (i_size = '{SIZE1}' or i_size = '{SIZE2}') + ) or + (i_category = 'Women' and + (i_color = '{COLOR11}' or i_color = '{COLOR12}') and + (i_units = '{UNIT11}' or i_units = '{UNIT12}') and + (i_size = '{SIZE3}' or i_size = '{SIZE4}') + ) or + (i_category = 'Men' and + (i_color = '{COLOR13}' or i_color = '{COLOR14}') and + (i_units = '{UNIT13}' or i_units = '{UNIT14}') and + (i_size = '{SIZE5}' or i_size = '{SIZE6}') + ) or + (i_category = 'Men' and + (i_color = '{COLOR15}' or i_color = '{COLOR16}') and + (i_units = '{UNIT15}' or i_units = '{UNIT16}') and + (i_size = '{SIZE1}' or i_size = '{SIZE2}') + )))) > 0 + order by i_product_name + limit 100""", + 'parameter': + { + 'COLOR': { + 'type': "list", + 'size': 16, + 'range': ["bisque","black","blue","blush","chocolate","coral","cream","cyan","firebrick","frosted","gainsboro","ghost","goldenrod","green","grey","honeydew","hot","indian","ivory","khaki","lace","lavender","lawn","lime","linen","maroon","medium","midnight","mint","misty","moccasin","navy","olive","orange","orchid","pale","papaya","peach","peru","pink","plum","powder","puff","purple","red","rose","rosy","royal","saddle","salmon","sandy","seashell","sienna","sky","slate","smoke","snow","spring","steel","tan","thistle","tomato","turquoise","violet","wheat","white","yellow"] + }, + 'MANUFACT': { + 'type': "integer", + 'range': [667,1000] + }, + 'SIZE': { + 'type': "list", + 'size': 6, + 'range': ["N/A","petite","extra large","large","medium","small","economy"] + }, + 'UNIT': { + 'type': "list", + 'size': 16, + 'range': ["Box","Bunch","Bundle","Carton","Case","Cup","Dozen","Dram","Each","Gram","Gross","Lb","N/A","Ounce","Oz","Pallet","Pound","Tbl","Ton","Tsp","Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q42", + 'query': """ select dt.d_year + ,item.i_category_id + ,item.i_category + ,sum(ss_ext_sales_price) as sum_sales_price + from date_dim dt + ,store_sales + ,item + where dt.d_date_sk = store_sales.ss_sold_date_sk + and store_sales.ss_item_sk = item.i_item_sk + and item.i_manager_id = 1 + and dt.d_moy={MONTH} + and dt.d_year={YEAR} + group by dt.d_year + ,item.i_category_id + ,item.i_category + order by sum(ss_ext_sales_price) desc,dt.d_year + ,item.i_category_id + ,item.i_category + limit 100 """, + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q43", + 'query': """ select s_store_name, s_store_id, + sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales, + sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales, + sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales, + sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales, + sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales, + sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales, + sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales + from date_dim, store_sales, store + where d_date_sk = ss_sold_date_sk and + s_store_sk = ss_store_sk and + s_gmt_offset = {GMT} and + d_year = {YEAR} + group by s_store_name, s_store_id + order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales + limit 100""", + 'parameter': + { + 'GMT': { + 'type': "list", + 'range': ["-10.00","-9.00","-8.00","-7.00","-6.00","-5.00"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q44", + 'query': """ select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing + from(select * + from (select item_sk,rank() over (order by rank_col asc) rnk + from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col + from store_sales ss1 + where ss_store_sk = {STORE} + group by ss_item_sk + having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col + from store_sales + where ss_store_sk = {STORE} + and {NULLCOLSS} is null + group by ss_store_sk))V1)V11 + where rnk < 11) asceding, + (select * + from (select item_sk,rank() over (order by rank_col desc) rnk + from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col + from store_sales ss1 + where ss_store_sk = {STORE} + group by ss_item_sk + having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col + from store_sales + where ss_store_sk = {STORE} + and {NULLCOLSS} is null + group by ss_store_sk))V2)V21 + where rnk < 11) descending, + item i1, + item i2 + where asceding.rnk = descending.rnk + and i1.i_item_sk=asceding.item_sk + and i2.i_item_sk=descending.item_sk + order by asceding.rnk + limit 100""", + 'parameter': + { + 'NULLCOLSS': { + 'type': "list", + 'range': ["ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_promo_sk"] + }, + 'STORE': { + 'type': "integer", + 'range': [1,12] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q45", + 'query': """ select ca_zip, {GBOBC}, sum(ws_sales_price) as sum_sales_prices + from web_sales, customer, customer_address, date_dim, item + where ws_bill_customer_sk = c_customer_sk + and c_current_addr_sk = ca_address_sk + and ws_item_sk = i_item_sk + and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792') + or + i_item_id in (select i_item_id + from item + where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) + ) + ) + and ws_sold_date_sk = d_date_sk + and d_qoy = {QOY} and d_year = {YEAR} + group by ca_zip, {GBOBC} + order by ca_zip, {GBOBC} + limit 100""", + 'parameter': + { + 'GBOBC': { + 'type': "list", + 'range': ["ca_city","ca_county","ca_state"] + }, + 'QOY': { + 'type': "integer", + 'range': [1,2] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q46", + 'query': """ select c_last_name + ,c_first_name + ,ca_city + ,bought_city + ,ss_ticket_number + ,amt,profit + from + (select ss_ticket_number + ,ss_customer_sk + ,ca_city bought_city + ,sum(ss_coupon_amt) amt + ,sum(ss_net_profit) profit + from store_sales,date_dim,store,household_demographics,customer_address + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_store_sk = store.s_store_sk + and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk + and store_sales.ss_addr_sk = customer_address.ca_address_sk + and (household_demographics.hd_dep_count = {DEPCNT} or + household_demographics.hd_vehicle_count= {VEHCNT}) + and date_dim.d_dow in (6,0) + and date_dim.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + and store.s_city in ('{CITY1}','{CITY2}','{CITY3}','{CITY4}','{CITY5}') + group by ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city) dn,customer,customer_address current_addr + where ss_customer_sk = c_customer_sk + and customer.c_current_addr_sk = current_addr.ca_address_sk + and current_addr.ca_city <> bought_city + order by c_last_name + ,c_first_name + ,ca_city + ,bought_city + ,ss_ticket_number + limit 100""", + 'parameter': + { + 'DEPCNT': { + 'type': "integer", + 'range': [0,9] + }, + 'VEHCNT': { + 'type': "integer", + 'range': [-1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'CITY': { + 'type': "list", + 'size': 5, + 'range': ["Aberdeen","Acme","Adams","Adrian","Afton","Albany","Allentown","Allison","Alma","Alpha","Altamont","Amherst","Amity","Andover","Antioch","Appleton","Arcadia","Arcola","Argyle","Arlington","Armstrong","Arthur","Ashland","Ashley","Ashton","Athens","Avery","Avoca","Avon","Bailey","Baker","Barnes","Bartlett","Bath","Bay View","Bayside","Bayview","Bear Creek","Beech Grove","Beechwood","Belfast","Belleview","Belleville","Belmont","Bennett","Benton","Berea","Berlin","Bethany","Bethel","Bethesda","Bethlehem","Big Creek","Birmingham","Blaine","Blair","Blanchard","Bloomingdale","Blue Springs","Bolton","Boyd","Bradford","Bradley","Brandon","Brentwood","Bridgeport","Bristol","Brooklyn","Brooks","Brookville","Brookwood","Brownsville","Brunswick","Bryant","Buckhorn","Buckingham","Buena Vista","Buffalo","Bunker Hill","Burns","Burton","Butler","Byron","Caldwell","Caledonia","Calhoun","California","Cambridge","Camden","Camelot","Canaan","Carlisle","Carlton","Carpenter","Carter","Carthage","Cedar","Cedar Creek","Cedar Grove","Cedar Hill","Center","Center Point","Centerville","Chapel Hill","Charleston","Chatham","Chelsea","Cherokee","Cherry Grove","Cherry Valley","Chester","Chestnut Hill","Chestnut Ridge","Church Hill","Clearview","Clearwater","Clifford","Clifton","Climax","Clinton","Clyde","Coldwater","Colfax","Collinsville","Colonial Heights","Columbia","Columbus","Concord","Conway","Cooper","Cordova","Corinth","Cottonwood","Country Club Estates","Crawford","Crescent","Creston","Crestview","Crossroads","Crystal","Crystal Springs","Cuba","Cumberland","Cunningham","Curtis","Dale","Dallas","Darlington","Decatur","Deer Park","Deerfield","Delmar","Delta","Denmark","Denver","Derby","Dewey","Dover","Doyle","Duncan","Dunlap","Easton","Eastwood","Echo","Edgewater","Edgewood","Edwards","Egypt","Elba","Elgin","Elizabeth","Elkton","Ellisville","Ellsworth","Elm Grove","Elmwood","Empire","Enon","Enterprise","Eureka","Evans","Evansville","Evergreen","Fair Oaks","Fairbanks","Fairfax","Fairfield","Fairmont","Fairmount","Fairview","Farmersville","Farmington","Fayetteville","Ferguson","Ferndale","Fernwood","Fillmore","Fisher","Five Forks","Five Points","Flat Rock","Flatwoods","Flint","Flint Hill","Florence","Floyd","Forest","Forest Hills","Forest Park","Forestville","Foster","Four Points","Fowler","Fox","Frankfort","Franklin","Freedom","Freeman","Freeport","Fremont","Frenchtown","Friendship","Frogtown","Fulton","Galena","Gardner","Garfield","Garrison","Gary","Georgetown","Gilbert","Gilmore","Gladstone","Glencoe","Glendale","Glenville","Glenwood","Globe","Golden","Good Hope","Goshen","Grandview","Granite","Grant","Gravel Hill","Gray","Green Acres","Green Hill","Green Valley","Greenbrier","Greendale","Greenfield","Greenville","Greenwood","Griffin","Guilford","Gum Springs","Guthrie","Hamburg","Hamilton","Hampton","Hardy","Harmon","Harmony","Harper","Harris","Harrisburg","Hartland","Harvey","Hastings","Hawthorne","Hazelwood","Helena","Henry","Hidden Valley","Highland","Highland Park","Hillcrest","Hillsboro","Hillsdale","Hillside","Hilltop","Holiday Hills","Holland","Hollywood","Hopewell","Horton","Houston","Howell","Hubbard","Hunter","Huntington","Huntsville","Hurricane","Hyde Park","Indian Village","Ingleside","Jackson","Jacksonville","Jamestown","Jenkins","Jericho","Jerome","Jimtown","Johnson","Johnsonville","Johnstown","Jones","Jordan","Kelly","Kensington","Kent","Kimball","King","Kingston","Kirkland","Knollwood","La Grange","Lake Forest","Lake View","Lakeland","Lakeside","Lakeview","Lakeville","Lakewood","Lamont","Lancaster","Langdon","Laurel","Lawrence","Lawrenceville","Lebanon","Lee","Leesburg","Leesville","Leland","Lenox","Leon","Lewis","Lewisburg","Lewisville","Liberty","Lincoln","Linden","Lisbon","Little River","Littleton","Lodi","Lone Oak","Lone Pine","Lone Star","Long Branch","Longwood","Louisville","Lucas","Ludlow","Lynn","Macedonia","Macon","Manchester","Mansfield","Maple Grove","Maple Hill","Mapleton","Marietta","Marion","Marshall","Martin","Martinsville","Mason","Maxwell","Mayfield","Maywood","Meadowbrook","Mechanicsburg","Middletown","Midway","Milan","Milford","Millbrook","Milltown","Millwood","Milo","Mineral Springs","Monroe","Montague","Montezuma","Monticello","Montpelier","Montrose","Moore","Morgan","Morgantown","Morris","Morton","Mount Olive","Mount Pleasant","Mount Tabor","Mount Vernon","Mount Zion","Mountain View","Murphy","Murray","Nashville","Nebo","Needmore","New Boston","New Hope","New Salem","New Town","Newark","Newburg","Newport","Newton","Newtown","Nichols","Northwood","Norton","Norwood","Nottingham","Oak Grove","Oak Hill","Oak Ridge","Oakdale","Oakland","Oakley","Oakwood","Omega","Oneida","Orange","Owens","Page","Palmyra","Paradise","Parker","Parkwood","Patterson","Paxton","Payne","Peoria","Perkins","Perry","Peru","Philadelphia","Phillips","Phoenix","Pierce","Pine Grove","Pine Hill","Pine Ridge","Pine Valley","Pinecrest","Pineville","Piney Grove","Pinhook","Pioneer","Pisgah","Plainview","Plainville","Pleasant Grove","Pleasant Hill","Pleasant Valley","Point Pleasant","Pomona","Poplar Grove","Poplar Springs","Post Oak","Powell","Preston","Price","Proctor","Prospect","Prosperity","Providence","Pulaski","Pumpkin Center","Quincy","Randolph","Rankin","Raymond","Red Bank","Red Hill","Red Oak","Red Rock","Redland","Reno","Riceville","Richardson","Richfield","Richland","Richmond","Richville","Ridgeville","Ridgeway","Ridgewood","Riley","River Oaks","Riverdale","Riverside","Riverview","Roberts","Rochester","Rock Hill","Rock Springs","Rockford","Rockland","Rockwood","Rocky Point","Rolling Hills","Roscoe","Rose Hill","Rosebud","Roseville","Rosewood","Rossville","Roxbury","Roy","Royal","Ruby","Ruth","Rutland","Ryan","Saint Clair","Saint George","Saint James","Saint John","Saint Johns","Saint Paul","Salem","San Jose","Sand Hill","Sanford","Saratoga","Sardis","Sawyer","Scotland","Scottsville","Selma","Seneca","Shady Grove","Shamrock","Shannon","Sharon","Shaw","Shawnee","Sheffield","Shelby","Sheridan","Sherman","Sherwood Forest","Shiloh","Shore Acres","Sidney","Siloam","Silver City","Silver Creek","Silver Springs","Simpson","Slabtown","Sleepy Hollow","Smith","Smyrna","Snug Harbor","Somerset","Somerville","Spencer","Spring Grove","Spring Hill","Spring Lake","Spring Valley","Springdale","Springfield","Springhill","Springtown","Springville","Stafford","Star","State Line","Sterling","Stewart","Stony Point","Stratford","Stringtown","Sugar Hill","Sullivan","Sulphur Springs","Summerfield","Summerville","Summit","Sumner","Sunnyside","Sunrise","Sunset Beach","Sunshine","Superior","Sutton","Sycamore","Tabor","Taft","Tanglewood","Texas","The Meadows","Thomas","Thompson","Thompsonville","Three Forks","Tipton","Tracy","Tremont","Trenton","Trinity","Turner","Twin Oaks","Tyler","Tyrone","Union","Union City","Union Hill","Unionville","Unity","Utica","Valley View","Vance","Verona","Victoria","Vienna","Vista","Wakefield","Wallace","Walnut","Walnut Grove","Walton","Ward","Warwick","Washington Heights","Waterford","Waterloo","Waterville","Watkins","Wayland","Wayne","Webb","Welcome","Weldon","Wesley","West End","West Liberty","West Point","Westfield","Westgate","Westminster","Weston","Westport","Westville","Westwood","Wheatland","Whispering Pines","White City","White Hall","White Oak","White Plains","White Rock","Whitesville","Whitney","Wildwood","Willard","Williams","Williamsburg","Williamsville","Willis","Willow","Wilson","Wilton","Winchester","Winfield","Winona","Winslow","Wolf Creek","Woodbine","Woodbury","Woodcrest","Woodland","Woodland Hills","Woodland Park","Woodlawn","Woodrow","Woodruff","Woodside","Woodstock","Woodville","Wright","Wyoming","York","Yorktown","Youngstown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q47", + 'query': """with v1 as( + select i_category, i_brand, + s_store_name, s_company_name, + d_year, d_moy, + sum(ss_sales_price) sum_sales, + avg(sum(ss_sales_price)) over + (partition by i_category, i_brand, + s_store_name, s_company_name, d_year) + avg_monthly_sales, + rank() over + (partition by i_category, i_brand, + s_store_name, s_company_name + order by d_year, d_moy) rn + from item, store_sales, date_dim, store + where ss_item_sk = i_item_sk and + ss_sold_date_sk = d_date_sk and + ss_store_sk = s_store_sk and + ( + d_year = {YEAR} or + ( d_year = {YEAR}-1 and d_moy =12) or + ( d_year = {YEAR}+1 and d_moy =1) + ) + group by i_category, i_brand, + s_store_name, s_company_name, + d_year, d_moy), + v2 as( + select {SELECTONE} + {SELECTTWO} + ,v1.avg_monthly_sales + ,v1.sum_sales, v1_lag.sum_sales psum, v1_lead.sum_sales nsum + from v1, v1 v1_lag, v1 v1_lead + where v1.i_category = v1_lag.i_category and + v1.i_category = v1_lead.i_category and + v1.i_brand = v1_lag.i_brand and + v1.i_brand = v1_lead.i_brand and + v1.s_store_name = v1_lag.s_store_name and + v1.s_store_name = v1_lead.s_store_name and + v1.s_company_name = v1_lag.s_company_name and + v1.s_company_name = v1_lead.s_company_name and + v1.rn = v1_lag.rn + 1 and + v1.rn = v1_lead.rn - 1) + select * + from v2 + where d_year = {YEAR} and + avg_monthly_sales > 0 and + case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1 + order by sum_sales - avg_monthly_sales, {ORDERBY} + limit 100""", + 'parameter': + { + 'ORDERBY': { + 'type': "list", + 'range': ["avg_monthly_sales","sum_sales","psum","nsum"] + }, + 'SELECTONE': { + 'type': "list", + 'range': ["v1.i_category","v1.i_brand","v1.i_category","v1.i_brand, v1.s_store_name","v1.s_company_name","v1.s_store_name, v1.s_company_name","v1.i_category, v1.i_brand, v1.s_store_name, v1.s_company_name"] + }, + 'SELECTTWO': { + 'type': "list", + 'range': [",v1.d_year",",v1.d_year, v1.d_moy"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q48", + 'query': """select sum(ss_quantity) as sum_quantity + from store_sales, store, customer_demographics, customer_address, date_dim + where s_store_sk = ss_store_sk + and ss_sold_date_sk = d_date_sk and d_year = {YEAR} + and + ( + ( + cd_demo_sk = ss_cdemo_sk + and + cd_marital_status = '{MS1}' + and + cd_education_status = '{ES1}' + and + ss_sales_price between 100.00 and 150.00 + ) + or + ( + cd_demo_sk = ss_cdemo_sk + and + cd_marital_status = '{MS2}' + and + cd_education_status = '{ES2}' + and + ss_sales_price between 50.00 and 100.00 + ) + or + ( + cd_demo_sk = ss_cdemo_sk + and + cd_marital_status = '{MS3}' + and + cd_education_status = '{ES3}' + and + ss_sales_price between 150.00 and 200.00 + ) + ) + and + ( + ( + ss_addr_sk = ca_address_sk + and + ca_country = 'United States' + and + ca_state in ('{STATE1}', '{STATE2}', '{STATE3}') + and ss_net_profit between 0 and 2000 + ) + or + (ss_addr_sk = ca_address_sk + and + ca_country = 'United States' + and + ca_state in ('{STATE4}', '{STATE5}', '{STATE6}') + and ss_net_profit between 150 and 3000 + ) + or + (ss_addr_sk = ca_address_sk + and + ca_country = 'United States' + and + ca_state in ('{STATE7}', '{STATE8}', '{STATE9}') + and ss_net_profit between 50 and 25000 + ) + ) + """, + 'parameter': + { + 'ES': { + 'type': "list", + 'size': 3, + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + }, + 'MS': { + 'type': "list", + 'size': 3, + 'range': ["M","S","D","W","U"] + }, + 'STATE': { + 'type': "list", + 'size': 9, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DC","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q49", + 'query': """(select channel, item, return_ratio, return_rank, currency_rank from + (select + 'web' as channel + ,web.item + ,web.return_ratio + ,web.return_rank + ,web.currency_rank + from ( + select + item + ,return_ratio + ,currency_ratio + ,rank() over (order by return_ratio) as return_rank + ,rank() over (order by currency_ratio) as currency_rank + from + ( select ws.ws_item_sk as item + ,(cast(sum(coalesce(wr.wr_return_quantity,0)) as decimal(15,4))/ + cast(sum(coalesce(ws.ws_quantity,0)) as decimal(15,4) )) as return_ratio + ,(cast(sum(coalesce(wr.wr_return_amt,0)) as decimal(15,4))/ + cast(sum(coalesce(ws.ws_net_paid,0)) as decimal(15,4) )) as currency_ratio + from + web_sales ws left outer join web_returns wr + on (ws.ws_order_number = wr.wr_order_number and + ws.ws_item_sk = wr.wr_item_sk) + ,date_dim + where + wr.wr_return_amt > 10000 + and ws.ws_net_profit > 1 + and ws.ws_net_paid > 0 + and ws.ws_quantity > 0 + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + group by ws.ws_item_sk + ) in_web + ) web + where + ( + web.return_rank <= 10 + or + web.currency_rank <= 10 + )) x + union + (select + 'catalog' as channel + ,catalog.item + ,catalog.return_ratio + ,catalog.return_rank + ,catalog.currency_rank + from ( + select + item + ,return_ratio + ,currency_ratio + ,rank() over (order by return_ratio) as return_rank + ,rank() over (order by currency_ratio) as currency_rank + from + ( select + cs.cs_item_sk as item + ,(cast(sum(coalesce(cr.cr_return_quantity,0)) as decimal(15,4))/ + cast(sum(coalesce(cs.cs_quantity,0)) as decimal(15,4) )) as return_ratio + ,(cast(sum(coalesce(cr.cr_return_amount,0)) as decimal(15,4))/ + cast(sum(coalesce(cs.cs_net_paid,0)) as decimal(15,4) )) as currency_ratio + from + catalog_sales cs left outer join catalog_returns cr + on (cs.cs_order_number = cr.cr_order_number and + cs.cs_item_sk = cr.cr_item_sk) + ,date_dim + where + cr.cr_return_amount > 10000 + and cs.cs_net_profit > 1 + and cs.cs_net_paid > 0 + and cs.cs_quantity > 0 + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + group by cs.cs_item_sk + ) in_cat + ) catalog + where + ( + catalog.return_rank <= 10 + or + catalog.currency_rank <=10 + )) + union + (select + 'store' as channel + ,store.item + ,store.return_ratio + ,store.return_rank + ,store.currency_rank + from ( + select + item + ,return_ratio + ,currency_ratio + ,rank() over (order by return_ratio) as return_rank + ,rank() over (order by currency_ratio) as currency_rank + from + ( select sts.ss_item_sk as item + ,(cast(sum(coalesce(sr.sr_return_quantity,0)) as decimal(15,4))/cast(sum(coalesce(sts.ss_quantity,0)) as decimal(15,4) )) as return_ratio + ,(cast(sum(coalesce(sr.sr_return_amt,0)) as decimal(15,4))/cast(sum(coalesce(sts.ss_net_paid,0)) as decimal(15,4) )) as currency_ratio + from + store_sales sts left outer join store_returns sr + on (sts.ss_ticket_number = sr.sr_ticket_number and sts.ss_item_sk = sr.sr_item_sk) + ,date_dim + where + sr.sr_return_amt > 10000 + and sts.ss_net_profit > 1 + and sts.ss_net_paid > 0 + and sts.ss_quantity > 0 + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + group by sts.ss_item_sk + ) in_store + ) store + where ( + store.return_rank <= 10 + or + store.currency_rank <= 10 + ) + )) + order by 1,4,5,2 + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q50", + 'query': """ select + s_store_name + ,s_company_id + ,s_street_number + ,s_street_name + ,s_street_type + ,s_suite_number + ,s_city + ,s_county + ,s_state + ,s_zip + ,sum(case when (sr_returned_date_sk - ss_sold_date_sk <= 30 ) then 1 else 0 end) as "30 days" + ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 30) and + (sr_returned_date_sk - ss_sold_date_sk <= 60) then 1 else 0 end ) as "31-60 days" + ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 60) and + (sr_returned_date_sk - ss_sold_date_sk <= 90) then 1 else 0 end) as "61-90 days" + ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 90) and + (sr_returned_date_sk - ss_sold_date_sk <= 120) then 1 else 0 end) as "91-120 days" + ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 120) then 1 else 0 end) as ">120 days" + from + store_sales + ,store_returns + ,store + ,date_dim d1 + ,date_dim d2 + where + d2.d_year = {YEAR} + and d2.d_moy = {MONTH} + and ss_ticket_number = sr_ticket_number + and ss_item_sk = sr_item_sk + and ss_sold_date_sk = d1.d_date_sk + and sr_returned_date_sk = d2.d_date_sk + and ss_customer_sk = sr_customer_sk + and ss_store_sk = s_store_sk + group by + s_store_name + ,s_company_id + ,s_street_number + ,s_street_name + ,s_street_type + ,s_suite_number + ,s_city + ,s_county + ,s_state + ,s_zip + order by s_store_name + ,s_company_id + ,s_street_number + ,s_street_name + ,s_street_type + ,s_suite_number + ,s_city + ,s_county + ,s_state + ,s_zip + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [8,10] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q51", + 'query': """WITH web_v1 as ( + select + ws_item_sk item_sk, d_date, + sum(sum(ws_sales_price)) + over (partition by ws_item_sk order by d_date rows between unbounded preceding and current row) cume_sales + from web_sales + ,date_dim + where ws_sold_date_sk=d_date_sk + and d_month_seq between {DMS} and {DMS}+11 + and ws_item_sk is not NULL + group by ws_item_sk, d_date), + store_v1 as ( + select + ss_item_sk item_sk, d_date, + sum(sum(ss_sales_price)) + over (partition by ss_item_sk order by d_date rows between unbounded preceding and current row) cume_sales + from store_sales + ,date_dim + where ss_sold_date_sk=d_date_sk + and d_month_seq between {DMS} and {DMS}+11 + and ss_item_sk is not NULL + group by ss_item_sk, d_date) + select * + from (select item_sk + ,d_date + ,web_sales + ,store_sales + ,max(web_sales) + over (partition by item_sk order by d_date rows between unbounded preceding and current row) web_cumulative + ,max(store_sales) + over (partition by item_sk order by d_date rows between unbounded preceding and current row) store_cumulative + from (select case when web.item_sk is not null then web.item_sk else store.item_sk end item_sk + ,case when web.d_date is not null then web.d_date else store.d_date end d_date + ,web.cume_sales web_sales + ,store.cume_sales store_sales + from web_v1 web full outer join store_v1 store on (web.item_sk = store.item_sk + and web.d_date = store.d_date) + )x )y + where web_cumulative > store_cumulative + order by item_sk + ,d_date + limit 100""", + 'DBMS': { + 'MySQL': """WITH web_v1 AS ( + SELECT + ws_item_sk AS item_sk, + d_date, + SUM(SUM(ws_sales_price)) OVER (PARTITION BY ws_item_sk ORDER BY d_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS cume_sales + FROM web_sales, date_dim + WHERE ws_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND ws_item_sk IS NOT NULL + GROUP BY ws_item_sk, d_date +), +store_v1 AS ( + SELECT + ss_item_sk AS item_sk, + d_date, + SUM(SUM(ss_sales_price)) OVER (PARTITION BY ss_item_sk ORDER BY d_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS cume_sales + FROM store_sales, date_dim + WHERE ss_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND ss_item_sk IS NOT NULL + GROUP BY ss_item_sk, d_date +), +combined_sales AS ( + SELECT + item_sk, + d_date, + MAX(CASE WHEN source = 'web' THEN cume_sales END) AS web_sales, + MAX(CASE WHEN source = 'store' THEN cume_sales END) AS store_sales + FROM ( + SELECT item_sk, d_date, cume_sales, 'web' AS source + FROM web_v1 + UNION ALL + SELECT item_sk, d_date, cume_sales, 'store' AS source + FROM store_v1 + ) AS combined + GROUP BY item_sk, d_date +), +cumulative AS ( + SELECT + item_sk, + d_date, + web_sales, + store_sales, + MAX(web_sales) OVER (PARTITION BY item_sk ORDER BY d_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS web_cumulative, + MAX(store_sales) OVER (PARTITION BY item_sk ORDER BY d_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS store_cumulative + FROM combined_sales +) +SELECT * FROM cumulative +WHERE web_cumulative > store_cumulative +ORDER BY item_sk, d_date +LIMIT 100""" + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q52", + 'query': """ select dt.d_year + ,item.i_brand_id brand_id + ,item.i_brand brand + ,sum(ss_ext_sales_price) ext_price + from date_dim dt + ,store_sales + ,item + where dt.d_date_sk = store_sales.ss_sold_date_sk + and store_sales.ss_item_sk = item.i_item_sk + and item.i_manager_id = 1 + and dt.d_moy={MONTH} + and dt.d_year={YEAR} + group by dt.d_year + ,item.i_brand + ,item.i_brand_id + order by dt.d_year + ,ext_price desc + ,brand_id + limit 100 """, + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q53", + 'query': """select * from + (select i_manufact_id, + sum(ss_sales_price) sum_sales, + avg(sum(ss_sales_price)) over (partition by i_manufact_id) avg_quarterly_sales + from item, store_sales, date_dim, store + where ss_item_sk = i_item_sk and + ss_sold_date_sk = d_date_sk and + ss_store_sk = s_store_sk and + d_month_seq in ({DMS},{DMS}+1,{DMS}+2,{DMS}+3,{DMS}+4,{DMS}+5,{DMS}+6,{DMS}+7,{DMS}+8,{DMS}+9,{DMS}+10,{DMS}+11) and + ((i_category in ('Books','Children','Electronics') and + i_class in ('personal','portable','reference','self-help') and + i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7', + 'exportiunivamalg #9','scholaramalgamalg #9')) + or(i_category in ('Women','Music','Men') and + i_class in ('accessories','classical','fragrances','pants') and + i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1', + 'importoamalg #1'))) + group by i_manufact_id, d_qoy ) tmp1 + where case when avg_quarterly_sales > 0 + then abs (sum_sales - avg_quarterly_sales)/ avg_quarterly_sales + else null end > 0.1 + order by avg_quarterly_sales, + sum_sales, + i_manufact_id + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q54", + 'query': """with my_customers as ( + select distinct c_customer_sk + , c_current_addr_sk + from + ( select cs_sold_date_sk sold_date_sk, + cs_bill_customer_sk customer_sk, + cs_item_sk item_sk + from catalog_sales + union all + select ws_sold_date_sk sold_date_sk, + ws_bill_customer_sk customer_sk, + ws_item_sk item_sk + from web_sales + ) cs_or_ws_sales, + item, + date_dim, + customer + where sold_date_sk = d_date_sk + and item_sk = i_item_sk + and i_category = '{CATEGORY}' + and i_class = '{CLASS}' + and c_customer_sk = cs_or_ws_sales.customer_sk + and d_moy = {MONTH} + and d_year = {YEAR} + ) + , my_revenue as ( + select c_customer_sk, + sum(ss_ext_sales_price) as revenue + from my_customers, + store_sales, + customer_address, + store, + date_dim + where c_current_addr_sk = ca_address_sk + and ca_county = s_county + and ca_state = s_state + and ss_sold_date_sk = d_date_sk + and c_customer_sk = ss_customer_sk + and d_month_seq between (select distinct d_month_seq+1 + from date_dim where d_year = {YEAR} and d_moy = {MONTH}) + and (select distinct d_month_seq+3 + from date_dim where d_year = {YEAR} and d_moy = {MONTH}) + group by c_customer_sk + ) + , segments as + (select cast((revenue/50) as int) as segment + from my_revenue + ) + select segment, count(*) as num_customers, segment*50 as segment_base + from segments + group by segment + order by segment, num_customers + limit 100""", + 'DBMS': { + 'MySQL': """with my_customers as ( + select distinct c_customer_sk + , c_current_addr_sk + from + ( select cs_sold_date_sk sold_date_sk, + cs_bill_customer_sk customer_sk, + cs_item_sk item_sk + from catalog_sales + union all + select ws_sold_date_sk sold_date_sk, + ws_bill_customer_sk customer_sk, + ws_item_sk item_sk + from web_sales + ) cs_or_ws_sales, + item, + date_dim, + customer + where sold_date_sk = d_date_sk + and item_sk = i_item_sk + and i_category = '{CATEGORY}' + and i_class = '{CLASS}' + and c_customer_sk = cs_or_ws_sales.customer_sk + and d_moy = {MONTH} + and d_year = {YEAR} + ) + , my_revenue as ( + select c_customer_sk, + sum(ss_ext_sales_price) as revenue + from my_customers, + store_sales, + customer_address, + store, + date_dim + where c_current_addr_sk = ca_address_sk + and ca_county = s_county + and ca_state = s_state + and ss_sold_date_sk = d_date_sk + and c_customer_sk = ss_customer_sk + and d_month_seq between (select distinct d_month_seq+1 + from date_dim where d_year = {YEAR} and d_moy = {MONTH}) + and (select distinct d_month_seq+3 + from date_dim where d_year = {YEAR} and d_moy = {MONTH}) + group by c_customer_sk + ) + , segments as + (select cast((revenue/50) as signed) as segment + from my_revenue + ) + select segment, count(*) as num_customers, segment*50 as segment_base + from segments + group by segment + order by segment, num_customers + limit 100""", + }, + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + }, + 'CLASS': { + 'type': "list", + 'range': ["accent","accessories","archery","arts","athletic","athletic","shoes","audio","automotive","baseball","basketball","bathroom","bedding","birdal","blinds/shades","bracelets","business","camcorders","cameras","camping","classical","computers","consignment","cooking","costume","country","curtains/drapes","custom","decor","diamonds","disk","drives","dresses","dvd/vcr","players","earings","entertainments","estate","fiction","fishing","fitness","flatware","football","fragrances","furniture","glassware","gold","golf","guns","history","hockey","home","repair","infants","jewelry","boxes","karoke","kids","lighting","loose","stones","maternity","mattresses","memory","mens","mens","watch","monitors","musical","mystery","newborn","optics","outdoor","paint","pants","parenting","pendants","personal","pools","pop","portable","reference","rings","rock","romance","rugs","sailing","scanners","school-uniforms","science","self-help","semi-precious","shirts","sports","sports-apparel","stereo","swimwear","tables","televisions","tennis","toddlers","travel","wallpaper","wireless","womens","womens watch"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q55", + 'query': """ select i_brand_id brand_id, i_brand brand, + sum(ss_ext_sales_price) ext_price + from date_dim, store_sales, item + where d_date_sk = ss_sold_date_sk + and ss_item_sk = i_item_sk + and i_manager_id={MANAGER} + and d_moy={MONTH} + and d_year={YEAR} + group by i_brand, i_brand_id + order by ext_price desc, i_brand_id + limit 100 """, + 'parameter': + { + 'MANAGER': { + 'type': "integer", + 'range': [1,100] + }, + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q56", + 'query': """with ss as ( + select i_item_id,sum(ss_ext_sales_price) total_sales + from + store_sales, + date_dim, + customer_address, + item + where i_item_id in (select + i_item_id + from item + where i_color in ('{COLOR1}','{COLOR2}','{COLOR3}')) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ss_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id), + cs as ( + select i_item_id,sum(cs_ext_sales_price) total_sales + from + catalog_sales, + date_dim, + customer_address, + item + where + i_item_id in (select + i_item_id + from item + where i_color in ('{COLOR1}','{COLOR2}','{COLOR3}')) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and cs_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id), + ws as ( + select i_item_id,sum(ws_ext_sales_price) total_sales + from + web_sales, + date_dim, + customer_address, + item + where + i_item_id in (select + i_item_id + from item + where i_color in ('{COLOR1}','{COLOR2}','{COLOR3}')) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ws_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id) + select i_item_id ,sum(total_sales) total_sales + from (select * from ss + union all + select * from cs + union all + select * from ws) tmp1 + group by i_item_id + order by total_sales, + i_item_id + limit 100""", + 'parameter': + { + 'COLOR': { + 'type': "list", + 'size': 3, + 'range': ["bisque","black","blue","blush","chocolate","coral","cream","cyan","firebrick","frosted","gainsboro","ghost","goldenrod","green","grey","honeydew","hot","indian","ivory","khaki","lace","lavender","lawn","lime","linen","maroon","medium","midnight","mint","misty","moccasin","navy","olive","orange","orchid","pale","papaya","peach","peru","pink","plum","powder","puff","purple","red","rose","rosy","royal","saddle","salmon","sandy","seashell","sienna","sky","slate","smoke","snow","spring","steel","tan","thistle","tomato","turquoise","violet","wheat","white","yellow"] + }, + 'GMT': { + 'type': "list", + 'range': ["-10.00","-9.00","-8.00","-7.00","-6.00","-5.00"] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q57", + 'query': """with v1 as( + select i_category, i_brand, + cc_name, + d_year, d_moy, + sum(cs_sales_price) sum_sales, + avg(sum(cs_sales_price)) over + (partition by i_category, i_brand, + cc_name, d_year) + avg_monthly_sales, + rank() over + (partition by i_category, i_brand, + cc_name + order by d_year, d_moy) rn + from item, catalog_sales, date_dim, call_center + where cs_item_sk = i_item_sk and + cs_sold_date_sk = d_date_sk and + cc_call_center_sk= cs_call_center_sk and + ( + d_year = {YEAR} or + ( d_year = {YEAR}-1 and d_moy =12) or + ( d_year = {YEAR}+1 and d_moy =1) + ) + group by i_category, i_brand, + cc_name , d_year, d_moy), + v2 as( + select {SELECTONE} + {SELECTTWO} + ,v1.avg_monthly_sales + ,v1.sum_sales, v1_lag.sum_sales psum, v1_lead.sum_sales nsum + from v1, v1 v1_lag, v1 v1_lead + where v1.i_category = v1_lag.i_category and + v1.i_category = v1_lead.i_category and + v1.i_brand = v1_lag.i_brand and + v1.i_brand = v1_lead.i_brand and + v1. cc_name = v1_lag. cc_name and + v1. cc_name = v1_lead. cc_name and + v1.rn = v1_lag.rn + 1 and + v1.rn = v1_lead.rn - 1) + select * + from v2 + where d_year = {YEAR} and + avg_monthly_sales > 0 and + case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1 + order by sum_sales - avg_monthly_sales, {ORDERBY} + limit 100""", + 'parameter': + { + 'ORDERBY': { + 'type': "list", + 'range': ["avg_monthly_sales","sum_sales","psum","nsum"] + }, + 'SELECTONE': { + 'type': "list", + 'range': ["v1.i_category","v1.i_brand","v1.i_category, v1.i_brand","v1.cc_name","v1.i_category, v1.i_brand, v1.cc_name"] + }, + 'SELECTTWO': { + 'type': "list", + 'range': [",v1.d_year",",v1.d_year, v1.d_moy"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2001] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q58", + 'query': """with ss_items as + (select i_item_id item_id + ,sum(ss_ext_sales_price) ss_item_rev + from store_sales + ,item + ,date_dim + where ss_item_sk = i_item_sk + and d_date in (select d_date + from date_dim + where d_week_seq = (select d_week_seq + from date_dim + where d_date = '{YEAR}-{MONTH}-{DAY}')) + and ss_sold_date_sk = d_date_sk + group by i_item_id), + cs_items as + (select i_item_id item_id + ,sum(cs_ext_sales_price) cs_item_rev + from catalog_sales + ,item + ,date_dim + where cs_item_sk = i_item_sk + and d_date in (select d_date + from date_dim + where d_week_seq = (select d_week_seq + from date_dim + where d_date = '{YEAR}-{MONTH}-{DAY}')) + and cs_sold_date_sk = d_date_sk + group by i_item_id), + ws_items as + (select i_item_id item_id + ,sum(ws_ext_sales_price) ws_item_rev + from web_sales + ,item + ,date_dim + where ws_item_sk = i_item_sk + and d_date in (select d_date + from date_dim + where d_week_seq =(select d_week_seq + from date_dim + where d_date = '{YEAR}-{MONTH}-{DAY}')) + and ws_sold_date_sk = d_date_sk + group by i_item_id) + select ss_items.item_id + ,ss_item_rev + ,ss_item_rev/((ss_item_rev+cs_item_rev+ws_item_rev)/3) * 100 ss_dev + ,cs_item_rev + ,cs_item_rev/((ss_item_rev+cs_item_rev+ws_item_rev)/3) * 100 cs_dev + ,ws_item_rev + ,ws_item_rev/((ss_item_rev+cs_item_rev+ws_item_rev)/3) * 100 ws_dev + ,(ss_item_rev+cs_item_rev+ws_item_rev)/3 average + from ss_items,cs_items,ws_items + where ss_items.item_id=cs_items.item_id + and ss_items.item_id=ws_items.item_id + and ss_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev + and ss_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev + and cs_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev + and cs_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev + and ws_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev + and ws_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev + order by item_id + ,ss_item_rev + limit 100""", + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,24] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q59", + 'query': """with wss as + (select d_week_seq, + ss_store_sk, + sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales, + sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales, + sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales, + sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales, + sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales, + sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales, + sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales + from store_sales,date_dim + where d_date_sk = ss_sold_date_sk + group by d_week_seq,ss_store_sk + ) + select s_store_name1,s_store_id1,d_week_seq1 + ,sun_sales1/sun_sales2 sun_sales, mon_sales1/mon_sales2 mon_sales + ,tue_sales1/tue_sales2 tue_sales, wed_sales1/wed_sales2 wed_sales + ,thu_sales1/thu_sales2 thu_sales + ,fri_sales1/fri_sales2 fri_sales, sat_sales1/sat_sales2 sat_sales + from + (select s_store_name s_store_name1,wss.d_week_seq d_week_seq1 + ,s_store_id s_store_id1,sun_sales sun_sales1 + ,mon_sales mon_sales1,tue_sales tue_sales1 + ,wed_sales wed_sales1,thu_sales thu_sales1 + ,fri_sales fri_sales1,sat_sales sat_sales1 + from wss,store,date_dim d + where d.d_week_seq = wss.d_week_seq and + ss_store_sk = s_store_sk and + d_month_seq between {DMS} and {DMS} + 11) y, + (select s_store_name s_store_name2,wss.d_week_seq d_week_seq2 + ,s_store_id s_store_id2,sun_sales sun_sales2 + ,mon_sales mon_sales2,tue_sales tue_sales2 + ,wed_sales wed_sales2,thu_sales thu_sales2 + ,fri_sales fri_sales2,sat_sales sat_sales2 + from wss,store,date_dim d + where d.d_week_seq = wss.d_week_seq and + ss_store_sk = s_store_sk and + d_month_seq between {DMS}+ 12 and {DMS} + 23) x + where s_store_id1=s_store_id2 + and d_week_seq1=d_week_seq2-52 + order by s_store_name1,s_store_id1,d_week_seq1 + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1212] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q60", + 'query': """with ss as ( + select + i_item_id,sum(ss_ext_sales_price) total_sales + from + store_sales, + date_dim, + customer_address, + item + where + i_item_id in (select + i_item_id + from + item + where i_category in ('{CATEGORY}')) + and ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ss_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id), + cs as ( + select + i_item_id,sum(cs_ext_sales_price) total_sales + from + catalog_sales, + date_dim, + customer_address, + item + where + i_item_id in (select + i_item_id + from + item + where i_category in ('{CATEGORY}')) + and cs_item_sk = i_item_sk + and cs_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and cs_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id), + ws as ( + select + i_item_id,sum(ws_ext_sales_price) total_sales + from + web_sales, + date_dim, + customer_address, + item + where + i_item_id in (select + i_item_id + from + item + where i_category in ('{CATEGORY}')) + and ws_item_sk = i_item_sk + and ws_sold_date_sk = d_date_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ws_bill_addr_sk = ca_address_sk + and ca_gmt_offset = {GMT} + group by i_item_id) + select + i_item_id + ,sum(total_sales) total_sales + from (select * from ss + union all + select * from cs + union all + select * from ws) tmp1 + group by i_item_id + order by i_item_id + ,total_sales + limit 100""", + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'range': ["Children","Men","Music","Jewelry","Shoes"] + }, + 'GMT': { + 'type': "list", + 'range': ["-10.00","-9.00","-8.00","-7.00","-6.00","-5.00"] + }, + 'MONTH': { + 'type': "integer", + 'range': [8,10] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q61", + 'query': """ select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4))*100 prom_per_total + from + (select sum(ss_ext_sales_price) promotions + from store_sales + ,store + ,promotion + ,date_dim + ,customer + ,customer_address + ,item + where ss_sold_date_sk = d_date_sk + and ss_store_sk = s_store_sk + and ss_promo_sk = p_promo_sk + and ss_customer_sk= c_customer_sk + and ca_address_sk = c_current_addr_sk + and ss_item_sk = i_item_sk + and ca_gmt_offset = {GMT} + and i_category = '{CATEGORY}' + and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y') + and s_gmt_offset = {GMT} + and d_year = {YEAR} + and d_moy = {MONTH}) promotional_sales, + (select sum(ss_ext_sales_price) total + from store_sales + ,store + ,date_dim + ,customer + ,customer_address + ,item + where ss_sold_date_sk = d_date_sk + and ss_store_sk = s_store_sk + and ss_customer_sk= c_customer_sk + and ca_address_sk = c_current_addr_sk + and ss_item_sk = i_item_sk + and ca_gmt_offset = {GMT} + and i_category = '{CATEGORY}' + and s_gmt_offset = {GMT} + and d_year = {YEAR} + and d_moy = {MONTH}) all_sales + order by promotions, total + limit 100""", + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'range': ["Books","Home","Electronics","Jewelry","Sports"] + }, + 'GMT': { + 'type': "list", + 'range': ["-6","-7"] + }, + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q62", + 'query': """with total as (select + substr(w_warehouse_name,1,20) as warehouse_name + ,sm_type + ,web_name + ,sum(case when (ws_ship_date_sk - ws_sold_date_sk <= 30 ) then 1 else 0 end) as "30 days" + ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 30) and + (ws_ship_date_sk - ws_sold_date_sk <= 60) then 1 else 0 end ) as "31-60 days" + ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 60) and + (ws_ship_date_sk - ws_sold_date_sk <= 90) then 1 else 0 end) as "61-90 days" + ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 90) and + (ws_ship_date_sk - ws_sold_date_sk <= 120) then 1 else 0 end) as "91-120 days" + ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 120) then 1 else 0 end) as ">120 days" + from + web_sales + ,warehouse + ,ship_mode + ,web_site + ,date_dim + where + d_month_seq between {DMS} and {DMS} + 11 + and ws_ship_date_sk = d_date_sk + and ws_warehouse_sk = w_warehouse_sk + and ws_ship_mode_sk = sm_ship_mode_sk + and ws_web_site_sk = web_site_sk + group by + substr(w_warehouse_name,1,20) + ,sm_type + ,web_name) + select * from total + order by warehouse_name is not null, warehouse_name + ,sm_type + ,web_name + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1212] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q63", + 'query': """ select * + from (select i_manager_id + ,sum(ss_sales_price) sum_sales + ,avg(sum(ss_sales_price)) over (partition by i_manager_id) avg_monthly_sales + from item + ,store_sales + ,date_dim + ,store + where ss_item_sk = i_item_sk + and ss_sold_date_sk = d_date_sk + and ss_store_sk = s_store_sk + and d_month_seq in ({DMS},{DMS}+1,{DMS}+2,{DMS}+3,{DMS}+4,{DMS}+5,{DMS}+6,{DMS}+7,{DMS}+8,{DMS}+9,{DMS}+10,{DMS}+11) + and (( i_category in ('Books','Children','Electronics') + and i_class in ('personal','portable','reference','self-help') + and i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7', + 'exportiunivamalg #9','scholaramalgamalg #9')) + or( i_category in ('Women','Music','Men') + and i_class in ('accessories','classical','fragrances','pants') + and i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1', + 'importoamalg #1'))) + group by i_manager_id, d_moy) tmp1 + where case when avg_monthly_sales > 0 then abs (sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1 + order by i_manager_id + ,avg_monthly_sales + ,sum_sales + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q64", + 'query': """with cs_ui as + (select cs_item_sk + ,sum(cs_ext_list_price) as sale,sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund + from catalog_sales + ,catalog_returns + where cs_item_sk = cr_item_sk + and cs_order_number = cr_order_number + group by cs_item_sk + having sum(cs_ext_list_price)>2*sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit)), + cross_sales as + (select i_product_name product_name + ,i_item_sk item_sk + ,s_store_name store_name + ,s_zip store_zip + ,ad1.ca_street_number b_street_number + ,ad1.ca_street_name b_street_name + ,ad1.ca_city b_city + ,ad1.ca_zip b_zip + ,ad2.ca_street_number c_street_number + ,ad2.ca_street_name c_street_name + ,ad2.ca_city c_city + ,ad2.ca_zip c_zip + ,d1.d_year as syear + ,d2.d_year as fsyear + ,d3.d_year s2year + ,count(*) cnt + ,sum(ss_wholesale_cost) s1 + ,sum(ss_list_price) s2 + ,sum(ss_coupon_amt) s3 + FROM store_sales + ,store_returns + ,cs_ui + ,date_dim d1 + ,date_dim d2 + ,date_dim d3 + ,store + ,customer + ,customer_demographics cd1 + ,customer_demographics cd2 + ,promotion + ,household_demographics hd1 + ,household_demographics hd2 + ,customer_address ad1 + ,customer_address ad2 + ,income_band ib1 + ,income_band ib2 + ,item + WHERE ss_store_sk = s_store_sk AND + ss_sold_date_sk = d1.d_date_sk AND + ss_customer_sk = c_customer_sk AND + ss_cdemo_sk= cd1.cd_demo_sk AND + ss_hdemo_sk = hd1.hd_demo_sk AND + ss_addr_sk = ad1.ca_address_sk and + ss_item_sk = i_item_sk and + ss_item_sk = sr_item_sk and + ss_ticket_number = sr_ticket_number and + ss_item_sk = cs_ui.cs_item_sk and + c_current_cdemo_sk = cd2.cd_demo_sk AND + c_current_hdemo_sk = hd2.hd_demo_sk AND + c_current_addr_sk = ad2.ca_address_sk and + c_first_sales_date_sk = d2.d_date_sk and + c_first_shipto_date_sk = d3.d_date_sk and + ss_promo_sk = p_promo_sk and + hd1.hd_income_band_sk = ib1.ib_income_band_sk and + hd2.hd_income_band_sk = ib2.ib_income_band_sk and + cd1.cd_marital_status <> cd2.cd_marital_status and + i_color in ('{COLOR1}','{COLOR2}','{COLOR3}','{COLOR4}','{COLOR5}','{COLOR6}') and + i_current_price between {PRICE} and {PRICE} + 10 and + i_current_price between {PRICE} + 1 and {PRICE} + 15 + group by i_product_name + ,i_item_sk + ,s_store_name + ,s_zip + ,ad1.ca_street_number + ,ad1.ca_street_name + ,ad1.ca_city + ,ad1.ca_zip + ,ad2.ca_street_number + ,ad2.ca_street_name + ,ad2.ca_city + ,ad2.ca_zip + ,d1.d_year + ,d2.d_year + ,d3.d_year + ) + select cs1.product_name + ,cs1.store_name + ,cs1.store_zip + ,cs1.b_street_number + ,cs1.b_street_name + ,cs1.b_city + ,cs1.b_zip + ,cs1.c_street_number + ,cs1.c_street_name + ,cs1.c_city + ,cs1.c_zip + ,cs1.syear syear1 + ,cs1.cnt cnt1 + ,cs1.s1 as s11 + ,cs1.s2 as s21 + ,cs1.s3 as s31 + ,cs2.s1 as s12 + ,cs2.s2 as s22 + ,cs2.s3 as s32 + ,cs2.syear syear2 + ,cs2.cnt cnt2 + from cross_sales cs1,cross_sales cs2 + where cs1.item_sk=cs2.item_sk and + cs1.syear = {YEAR} and + cs2.syear = {YEAR} + 1 and + cs2.cnt <= cs1.cnt and + cs1.store_name = cs2.store_name and + cs1.store_zip = cs2.store_zip + order by cs1.product_name + ,cs1.store_name + ,cs2.cnt + ,cs1.s1 + ,cs2.s1""", + 'parameter': + { + 'COLOR': { + 'type': "list", + 'size': 6, + 'range': ["bisque","black","blue","blush","chocolate","coral","cream","cyan","firebrick","frosted","gainsboro","ghost","goldenrod","green","grey","honeydew","hot","indian","ivory","khaki","lace","lavender","lawn","lime","linen","maroon","medium","midnight","mint","misty","moccasin","navy","olive","orange","orchid","pale","papaya","peach","peru","pink","plum","powder","puff","purple","red","rose","rosy","royal","saddle","salmon","sandy","seashell","sienna","sky","slate","smoke","snow","spring","steel","tan","thistle","tomato","turquoise","violet","wheat","white","yellow"] + }, + 'PRICE': { + 'type': "integer", + 'range': [0,85] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2001] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q65", + 'query': """ select + s_store_name, + i_item_desc, + sc.revenue, + i_current_price, + i_wholesale_cost, + i_brand + from store, item, + (select ss_store_sk, avg(revenue) as ave + from + (select ss_store_sk, ss_item_sk, + sum(ss_sales_price) as revenue + from store_sales, date_dim + where ss_sold_date_sk = d_date_sk and d_month_seq between {DMS} and {DMS}+11 + group by ss_store_sk, ss_item_sk) sa + group by ss_store_sk) sb, + (select ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue + from store_sales, date_dim + where ss_sold_date_sk = d_date_sk and d_month_seq between {DMS} and {DMS}+11 + group by ss_store_sk, ss_item_sk) sc + where sb.ss_store_sk = sc.ss_store_sk and + sc.revenue <= 0.1 * sb.ave and + s_store_sk = sc.ss_store_sk and + i_item_sk = sc.ss_item_sk + order by s_store_name is not null, s_store_name, i_item_desc is not null, i_item_desc + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q66", + 'query': """ select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + ,sum(jan_sales) as jan_sales + ,sum(feb_sales) as feb_sales + ,sum(mar_sales) as mar_sales + ,sum(apr_sales) as apr_sales + ,sum(may_sales) as may_sales + ,sum(jun_sales) as jun_sales + ,sum(jul_sales) as jul_sales + ,sum(aug_sales) as aug_sales + ,sum(sep_sales) as sep_sales + ,sum(oct_sales) as oct_sales + ,sum(nov_sales) as nov_sales + ,sum(dec_sales) as dec_sales + ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot + ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot + ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot + ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot + ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot + ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot + ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot + ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot + ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot + ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot + ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot + ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot + ,sum(jan_net) as jan_net + ,sum(feb_net) as feb_net + ,sum(mar_net) as mar_net + ,sum(apr_net) as apr_net + ,sum(may_net) as may_net + ,sum(jun_net) as jun_net + ,sum(jul_net) as jul_net + ,sum(aug_net) as aug_net + ,sum(sep_net) as sep_net + ,sum(oct_net) as oct_net + ,sum(nov_net) as nov_net + ,sum(dec_net) as dec_net + from ( + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,CONCAT('{SMC1}' , ',' , '{SMC2}') as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESONE}* ws_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESONE}* ws_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESONE}* ws_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESONE}* ws_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESONE}* ws_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESONE}* ws_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESONE}* ws_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESONE}* ws_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESONE}* ws_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESONE}* ws_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESONE}* ws_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESONE}* ws_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETONE} * ws_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETONE} * ws_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETONE} * ws_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETONE} * ws_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETONE} * ws_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETONE} * ws_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETONE} * ws_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETONE} * ws_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETONE} * ws_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETONE} * ws_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETONE} * ws_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETONE} * ws_quantity else 0 end) as dec_net + from + web_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + ws_warehouse_sk = w_warehouse_sk + and ws_sold_date_sk = d_date_sk + and ws_sold_time_sk = t_time_sk + and ws_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} and {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + union all + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,CONCAT('{SMC1}', ',', '{SMC2}') as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESTWO}* cs_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESTWO}* cs_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESTWO}* cs_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESTWO}* cs_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESTWO}* cs_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESTWO}* cs_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESTWO}* cs_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESTWO}* cs_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESTWO}* cs_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESTWO}* cs_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESTWO}* cs_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESTWO}* cs_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETTWO} * cs_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETTWO} * cs_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETTWO} * cs_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETTWO} * cs_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETTWO} * cs_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETTWO} * cs_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETTWO} * cs_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETTWO} * cs_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETTWO} * cs_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETTWO} * cs_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETTWO} * cs_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETTWO} * cs_quantity else 0 end) as dec_net + from + catalog_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + cs_warehouse_sk = w_warehouse_sk + and cs_sold_date_sk = d_date_sk + and cs_sold_time_sk = t_time_sk + and cs_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} AND {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + ) x + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + order by w_warehouse_name + limit 100""", + 'DBMS': { + 'MonetDB': """ select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + ,sum(jan_sales) as jan_sales + ,sum(feb_sales) as feb_sales + ,sum(mar_sales) as mar_sales + ,sum(apr_sales) as apr_sales + ,sum(may_sales) as may_sales + ,sum(jun_sales) as jun_sales + ,sum(jul_sales) as jul_sales + ,sum(aug_sales) as aug_sales + ,sum(sep_sales) as sep_sales + ,sum(oct_sales) as oct_sales + ,sum(nov_sales) as nov_sales + ,sum(dec_sales) as dec_sales + ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot + ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot + ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot + ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot + ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot + ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot + ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot + ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot + ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot + ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot + ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot + ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot + ,sum(jan_net) as jan_net + ,sum(feb_net) as feb_net + ,sum(mar_net) as mar_net + ,sum(apr_net) as apr_net + ,sum(may_net) as may_net + ,sum(jun_net) as jun_net + ,sum(jul_net) as jul_net + ,sum(aug_net) as aug_net + ,sum(sep_net) as sep_net + ,sum(oct_net) as oct_net + ,sum(nov_net) as nov_net + ,sum(dec_net) as dec_net + from ( + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESONE}* ws_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESONE}* ws_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESONE}* ws_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESONE}* ws_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESONE}* ws_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESONE}* ws_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESONE}* ws_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESONE}* ws_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESONE}* ws_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESONE}* ws_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESONE}* ws_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESONE}* ws_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETONE} * ws_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETONE} * ws_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETONE} * ws_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETONE} * ws_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETONE} * ws_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETONE} * ws_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETONE} * ws_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETONE} * ws_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETONE} * ws_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETONE} * ws_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETONE} * ws_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETONE} * ws_quantity else 0 end) as dec_net + from + web_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + ws_warehouse_sk = w_warehouse_sk + and ws_sold_date_sk = d_date_sk + and ws_sold_time_sk = t_time_sk + and ws_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} and {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + union all + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESTWO}* cs_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESTWO}* cs_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESTWO}* cs_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESTWO}* cs_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESTWO}* cs_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESTWO}* cs_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESTWO}* cs_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESTWO}* cs_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESTWO}* cs_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESTWO}* cs_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESTWO}* cs_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESTWO}* cs_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETTWO} * cs_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETTWO} * cs_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETTWO} * cs_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETTWO} * cs_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETTWO} * cs_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETTWO} * cs_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETTWO} * cs_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETTWO} * cs_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETTWO} * cs_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETTWO} * cs_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETTWO} * cs_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETTWO} * cs_quantity else 0 end) as dec_net + from + catalog_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + cs_warehouse_sk = w_warehouse_sk + and cs_sold_date_sk = d_date_sk + and cs_sold_time_sk = t_time_sk + and cs_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} AND {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + ) x + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + order by w_warehouse_name + limit 100""", + 'PostgreSQL': """ select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + ,sum(jan_sales) as jan_sales + ,sum(feb_sales) as feb_sales + ,sum(mar_sales) as mar_sales + ,sum(apr_sales) as apr_sales + ,sum(may_sales) as may_sales + ,sum(jun_sales) as jun_sales + ,sum(jul_sales) as jul_sales + ,sum(aug_sales) as aug_sales + ,sum(sep_sales) as sep_sales + ,sum(oct_sales) as oct_sales + ,sum(nov_sales) as nov_sales + ,sum(dec_sales) as dec_sales + ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot + ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot + ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot + ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot + ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot + ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot + ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot + ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot + ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot + ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot + ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot + ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot + ,sum(jan_net) as jan_net + ,sum(feb_net) as feb_net + ,sum(mar_net) as mar_net + ,sum(apr_net) as apr_net + ,sum(may_net) as may_net + ,sum(jun_net) as jun_net + ,sum(jul_net) as jul_net + ,sum(aug_net) as aug_net + ,sum(sep_net) as sep_net + ,sum(oct_net) as oct_net + ,sum(nov_net) as nov_net + ,sum(dec_net) as dec_net + from ( + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESONE}* ws_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESONE}* ws_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESONE}* ws_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESONE}* ws_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESONE}* ws_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESONE}* ws_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESONE}* ws_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESONE}* ws_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESONE}* ws_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESONE}* ws_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESONE}* ws_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESONE}* ws_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETONE} * ws_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETONE} * ws_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETONE} * ws_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETONE} * ws_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETONE} * ws_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETONE} * ws_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETONE} * ws_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETONE} * ws_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETONE} * ws_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETONE} * ws_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETONE} * ws_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETONE} * ws_quantity else 0 end) as dec_net + from + web_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + ws_warehouse_sk = w_warehouse_sk + and ws_sold_date_sk = d_date_sk + and ws_sold_time_sk = t_time_sk + and ws_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} and {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + union all + select + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,'{SMC1}' || ',' || '{SMC2}' as ship_carriers + ,d_year as years + ,sum(case when d_moy = 1 + then {SALESTWO}* cs_quantity else 0 end) as jan_sales + ,sum(case when d_moy = 2 + then {SALESTWO}* cs_quantity else 0 end) as feb_sales + ,sum(case when d_moy = 3 + then {SALESTWO}* cs_quantity else 0 end) as mar_sales + ,sum(case when d_moy = 4 + then {SALESTWO}* cs_quantity else 0 end) as apr_sales + ,sum(case when d_moy = 5 + then {SALESTWO}* cs_quantity else 0 end) as may_sales + ,sum(case when d_moy = 6 + then {SALESTWO}* cs_quantity else 0 end) as jun_sales + ,sum(case when d_moy = 7 + then {SALESTWO}* cs_quantity else 0 end) as jul_sales + ,sum(case when d_moy = 8 + then {SALESTWO}* cs_quantity else 0 end) as aug_sales + ,sum(case when d_moy = 9 + then {SALESTWO}* cs_quantity else 0 end) as sep_sales + ,sum(case when d_moy = 10 + then {SALESTWO}* cs_quantity else 0 end) as oct_sales + ,sum(case when d_moy = 11 + then {SALESTWO}* cs_quantity else 0 end) as nov_sales + ,sum(case when d_moy = 12 + then {SALESTWO}* cs_quantity else 0 end) as dec_sales + ,sum(case when d_moy = 1 + then {NETTWO} * cs_quantity else 0 end) as jan_net + ,sum(case when d_moy = 2 + then {NETTWO} * cs_quantity else 0 end) as feb_net + ,sum(case when d_moy = 3 + then {NETTWO} * cs_quantity else 0 end) as mar_net + ,sum(case when d_moy = 4 + then {NETTWO} * cs_quantity else 0 end) as apr_net + ,sum(case when d_moy = 5 + then {NETTWO} * cs_quantity else 0 end) as may_net + ,sum(case when d_moy = 6 + then {NETTWO} * cs_quantity else 0 end) as jun_net + ,sum(case when d_moy = 7 + then {NETTWO} * cs_quantity else 0 end) as jul_net + ,sum(case when d_moy = 8 + then {NETTWO} * cs_quantity else 0 end) as aug_net + ,sum(case when d_moy = 9 + then {NETTWO} * cs_quantity else 0 end) as sep_net + ,sum(case when d_moy = 10 + then {NETTWO} * cs_quantity else 0 end) as oct_net + ,sum(case when d_moy = 11 + then {NETTWO} * cs_quantity else 0 end) as nov_net + ,sum(case when d_moy = 12 + then {NETTWO} * cs_quantity else 0 end) as dec_net + from + catalog_sales + ,warehouse + ,date_dim + ,time_dim + ,ship_mode + where + cs_warehouse_sk = w_warehouse_sk + and cs_sold_date_sk = d_date_sk + and cs_sold_time_sk = t_time_sk + and cs_ship_mode_sk = sm_ship_mode_sk + and d_year = {YEAR} + and t_time between {TIMEONE} AND {TIMEONE}+28800 + and sm_carrier in ('{SMC1}','{SMC2}') + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,d_year + ) x + group by + w_warehouse_name + ,w_warehouse_sq_ft + ,w_city + ,w_county + ,w_state + ,w_country + ,ship_carriers + ,years + order by w_warehouse_name + limit 100""", + }, + 'parameter': + { + 'NETONE': { + 'type': "list", + 'range': ["ws_net_paid","ws_net_paid_inc_tax","ws_net_paid_inc_ship","ws_net_paid_inc_ship_tax","ws_net_profit"] + }, + 'NETTWO': { + 'type': "list", + 'range': ["cs_net_paid","cs_net_paid_inc_tax","cs_net_paid_inc_ship","cs_net_paid_inc_ship_tax","cs_net_profit"] + }, + 'SALESONE': { + 'type': "list", + 'range': ["ws_sales_price","ws_ext_sales_price","ws_ext_list_price"] + }, + 'SALESTWO': { + 'type': "list", + 'range': ["cs_sales_price","cs_ext_sales_price","cs_ext_list_price"] + }, + 'SMC': { + 'type': "list", + 'size': 2, + 'range': ["AIRBORNE","ALLIANCE","BARIAN","BOXBUNDLES","DHL","DIAMOND","FEDEX","GERMA","GREAT","EASTERN","HARMSTORF","LATVIAN","MSC","ORIENTAL","PRIVATECARRIER","RUPEKSA","TBS","UPS","USPS","ZHOU","ZOUROS"] + }, + 'TIMEONE': { + 'type': "integer", + 'range': [1,57597] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q67", + 'query': """ select * + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rank() over (partition by i_category order by sumsales desc) rk + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales + from store_sales + ,date_dim + ,store + ,item + where ss_sold_date_sk=d_date_sk + and ss_item_sk=i_item_sk + and ss_store_sk = s_store_sk + and d_month_seq between {DMS} and {DMS}+11 + group by i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id with rollup )dw1) dw2 + where rk <= 100 + order by i_category is not null, i_category + ,i_class is not null, i_class + ,i_brand is not null, i_brand + ,i_product_name is not null, i_product_name + ,d_year is not null, d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rk + limit 100""", + 'DBMS': { + 'MonetDB': + """ select * + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rank() over (partition by i_category order by sumsales desc) rk + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales + from store_sales + ,date_dim + ,store + ,item + where ss_sold_date_sk=d_date_sk + and ss_item_sk=i_item_sk + and ss_store_sk = s_store_sk + and d_month_seq between {DMS} and {DMS}+11 + group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id) )dw1) dw2 + where rk <= 100 + order by i_category is not null, i_category + ,i_class is not null, i_class + ,i_brand is not null, i_brand + ,i_product_name is not null, i_product_name + ,d_year is not null, d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rk + limit 100""", + 'PostgreSQL': + """ select * + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rank() over (partition by i_category order by sumsales desc) rk + from (select i_category + ,i_class + ,i_brand + ,i_product_name + ,d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales + from store_sales + ,date_dim + ,store + ,item + where ss_sold_date_sk=d_date_sk + and ss_item_sk=i_item_sk + and ss_store_sk = s_store_sk + and d_month_seq between {DMS} and {DMS}+11 + group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,s_store_id) )dw1) dw2 + where rk <= 100 + order by i_category is not null, i_category + ,i_class is not null, i_class + ,i_brand is not null, i_brand + ,i_product_name is not null, i_product_name + ,d_year is not null, d_year + ,d_qoy + ,d_moy + ,s_store_id + ,sumsales + ,rk + limit 100""", + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q68", + 'query': """ select c_last_name + ,c_first_name + ,ca_city + ,bought_city + ,ss_ticket_number + ,extended_price + ,extended_tax + ,list_price + from (select ss_ticket_number + ,ss_customer_sk + ,ca_city bought_city + ,sum(ss_ext_sales_price) extended_price + ,sum(ss_ext_list_price) list_price + ,sum(ss_ext_tax) extended_tax + from store_sales + ,date_dim + ,store + ,household_demographics + ,customer_address + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_store_sk = store.s_store_sk + and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk + and store_sales.ss_addr_sk = customer_address.ca_address_sk + and date_dim.d_dom between 1 and 2 + and (household_demographics.hd_dep_count = {DEPCNT} or + household_demographics.hd_vehicle_count= {VEHCNT}) + and date_dim.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + and store.s_city in ('{CITY1}','{CITY2}') + group by ss_ticket_number + ,ss_customer_sk + ,ss_addr_sk,ca_city) dn + ,customer + ,customer_address current_addr + where ss_customer_sk = c_customer_sk + and customer.c_current_addr_sk = current_addr.ca_address_sk + and current_addr.ca_city <> bought_city + order by c_last_name + ,ss_ticket_number + limit 100""", + 'parameter': + { + 'CITY': { + 'type': "list", + 'size': 2, + 'range': ["Aberdeen","Acme","Adams","Adrian","Afton","Albany","Allentown","Allison","Alma","Alpha","Altamont","Amherst","Amity","Andover","Antioch","Appleton","Arcadia","Arcola","Argyle","Arlington","Armstrong","Arthur","Ashland","Ashley","Ashton","Athens","Avery","Avoca","Avon","Bailey","Baker","Barnes","Bartlett","Bath","Bay View","Bayside","Bayview","Bear Creek","Beech Grove","Beechwood","Belfast","Belleview","Belleville","Belmont","Bennett","Benton","Berea","Berlin","Bethany","Bethel","Bethesda","Bethlehem","Big Creek","Birmingham","Blaine","Blair","Blanchard","Bloomingdale","Blue Springs","Bolton","Boyd","Bradford","Bradley","Brandon","Brentwood","Bridgeport","Bristol","Brooklyn","Brooks","Brookville","Brookwood","Brownsville","Brunswick","Bryant","Buckhorn","Buckingham","Buena Vista","Buffalo","Bunker Hill","Burns","Burton","Butler","Byron","Caldwell","Caledonia","Calhoun","California","Cambridge","Camden","Camelot","Canaan","Carlisle","Carlton","Carpenter","Carter","Carthage","Cedar","Cedar Creek","Cedar Grove","Cedar Hill","Center","Center Point","Centerville","Chapel Hill","Charleston","Chatham","Chelsea","Cherokee","Cherry Grove","Cherry Valley","Chester","Chestnut Hill","Chestnut Ridge","Church Hill","Clearview","Clearwater","Clifford","Clifton","Climax","Clinton","Clyde","Coldwater","Colfax","Collinsville","Colonial Heights","Columbia","Columbus","Concord","Conway","Cooper","Cordova","Corinth","Cottonwood","Country Club Estates","Crawford","Crescent","Creston","Crestview","Crossroads","Crystal","Crystal Springs","Cuba","Cumberland","Cunningham","Curtis","Dale","Dallas","Darlington","Decatur","Deer Park","Deerfield","Delmar","Delta","Denmark","Denver","Derby","Dewey","Dover","Doyle","Duncan","Dunlap","Easton","Eastwood","Echo","Edgewater","Edgewood","Edwards","Egypt","Elba","Elgin","Elizabeth","Elkton","Ellisville","Ellsworth","Elm Grove","Elmwood","Empire","Enon","Enterprise","Eureka","Evans","Evansville","Evergreen","Fair Oaks","Fairbanks","Fairfax","Fairfield","Fairmont","Fairmount","Fairview","Farmersville","Farmington","Fayetteville","Ferguson","Ferndale","Fernwood","Fillmore","Fisher","Five Forks","Five Points","Flat Rock","Flatwoods","Flint","Flint Hill","Florence","Floyd","Forest","Forest Hills","Forest Park","Forestville","Foster","Four Points","Fowler","Fox","Frankfort","Franklin","Freedom","Freeman","Freeport","Fremont","Frenchtown","Friendship","Frogtown","Fulton","Galena","Gardner","Garfield","Garrison","Gary","Georgetown","Gilbert","Gilmore","Gladstone","Glencoe","Glendale","Glenville","Glenwood","Globe","Golden","Good Hope","Goshen","Grandview","Granite","Grant","Gravel Hill","Gray","Green Acres","Green Hill","Green Valley","Greenbrier","Greendale","Greenfield","Greenville","Greenwood","Griffin","Guilford","Gum Springs","Guthrie","Hamburg","Hamilton","Hampton","Hardy","Harmon","Harmony","Harper","Harris","Harrisburg","Hartland","Harvey","Hastings","Hawthorne","Hazelwood","Helena","Henry","Hidden Valley","Highland","Highland Park","Hillcrest","Hillsboro","Hillsdale","Hillside","Hilltop","Holiday Hills","Holland","Hollywood","Hopewell","Horton","Houston","Howell","Hubbard","Hunter","Huntington","Huntsville","Hurricane","Hyde Park","Indian Village","Ingleside","Jackson","Jacksonville","Jamestown","Jenkins","Jericho","Jerome","Jimtown","Johnson","Johnsonville","Johnstown","Jones","Jordan","Kelly","Kensington","Kent","Kimball","King","Kingston","Kirkland","Knollwood","La Grange","Lake Forest","Lake View","Lakeland","Lakeside","Lakeview","Lakeville","Lakewood","Lamont","Lancaster","Langdon","Laurel","Lawrence","Lawrenceville","Lebanon","Lee","Leesburg","Leesville","Leland","Lenox","Leon","Lewis","Lewisburg","Lewisville","Liberty","Lincoln","Linden","Lisbon","Little River","Littleton","Lodi","Lone Oak","Lone Pine","Lone Star","Long Branch","Longwood","Louisville","Lucas","Ludlow","Lynn","Macedonia","Macon","Manchester","Mansfield","Maple Grove","Maple Hill","Mapleton","Marietta","Marion","Marshall","Martin","Martinsville","Mason","Maxwell","Mayfield","Maywood","Meadowbrook","Mechanicsburg","Middletown","Midway","Milan","Milford","Millbrook","Milltown","Millwood","Milo","Mineral Springs","Monroe","Montague","Montezuma","Monticello","Montpelier","Montrose","Moore","Morgan","Morgantown","Morris","Morton","Mount Olive","Mount Pleasant","Mount Tabor","Mount Vernon","Mount Zion","Mountain View","Murphy","Murray","Nashville","Nebo","Needmore","New Boston","New Hope","New Salem","New Town","Newark","Newburg","Newport","Newton","Newtown","Nichols","Northwood","Norton","Norwood","Nottingham","Oak Grove","Oak Hill","Oak Ridge","Oakdale","Oakland","Oakley","Oakwood","Omega","Oneida","Orange","Owens","Page","Palmyra","Paradise","Parker","Parkwood","Patterson","Paxton","Payne","Peoria","Perkins","Perry","Peru","Philadelphia","Phillips","Phoenix","Pierce","Pine Grove","Pine Hill","Pine Ridge","Pine Valley","Pinecrest","Pineville","Piney Grove","Pinhook","Pioneer","Pisgah","Plainview","Plainville","Pleasant Grove","Pleasant Hill","Pleasant Valley","Point Pleasant","Pomona","Poplar Grove","Poplar Springs","Post Oak","Powell","Preston","Price","Proctor","Prospect","Prosperity","Providence","Pulaski","Pumpkin Center","Quincy","Randolph","Rankin","Raymond","Red Bank","Red Hill","Red Oak","Red Rock","Redland","Reno","Riceville","Richardson","Richfield","Richland","Richmond","Richville","Ridgeville","Ridgeway","Ridgewood","Riley","River Oaks","Riverdale","Riverside","Riverview","Roberts","Rochester","Rock Hill","Rock Springs","Rockford","Rockland","Rockwood","Rocky Point","Rolling Hills","Roscoe","Rose Hill","Rosebud","Roseville","Rosewood","Rossville","Roxbury","Roy","Royal","Ruby","Ruth","Rutland","Ryan","Saint Clair","Saint George","Saint James","Saint John","Saint Johns","Saint Paul","Salem","San Jose","Sand Hill","Sanford","Saratoga","Sardis","Sawyer","Scotland","Scottsville","Selma","Seneca","Shady Grove","Shamrock","Shannon","Sharon","Shaw","Shawnee","Sheffield","Shelby","Sheridan","Sherman","Sherwood Forest","Shiloh","Shore Acres","Sidney","Siloam","Silver City","Silver Creek","Silver Springs","Simpson","Slabtown","Sleepy Hollow","Smith","Smyrna","Snug Harbor","Somerset","Somerville","Spencer","Spring Grove","Spring Hill","Spring Lake","Spring Valley","Springdale","Springfield","Springhill","Springtown","Springville","Stafford","Star","State Line","Sterling","Stewart","Stony Point","Stratford","Stringtown","Sugar Hill","Sullivan","Sulphur Springs","Summerfield","Summerville","Summit","Sumner","Sunnyside","Sunrise","Sunset Beach","Sunshine","Superior","Sutton","Sycamore","Tabor","Taft","Tanglewood","Texas","The Meadows","Thomas","Thompson","Thompsonville","Three Forks","Tipton","Tracy","Tremont","Trenton","Trinity","Turner","Twin Oaks","Tyler","Tyrone","Union","Union City","Union Hill","Unionville","Unity","Utica","Valley View","Vance","Verona","Victoria","Vienna","Vista","Wakefield","Wallace","Walnut","Walnut Grove","Walton","Ward","Warwick","Washington Heights","Waterford","Waterloo","Waterville","Watkins","Wayland","Wayne","Webb","Welcome","Weldon","Wesley","West End","West Liberty","West Point","Westfield","Westgate","Westminster","Weston","Westport","Westville","Westwood","Wheatland","Whispering Pines","White City","White Hall","White Oak","White Plains","White Rock","Whitesville","Whitney","Wildwood","Willard","Williams","Williamsburg","Williamsville","Willis","Willow","Wilson","Wilton","Winchester","Winfield","Winona","Winslow","Wolf Creek","Woodbine","Woodbury","Woodcrest","Woodland","Woodland Hills","Woodland Park","Woodlawn","Woodrow","Woodruff","Woodside","Woodstock","Woodville","Wright","Wyoming","York","Yorktown","Youngstown"] + }, + 'DEPCNT': { + 'type': "integer", + 'range': [0,9] + }, + 'VEHCNT': { + 'type': "integer", + 'range': [-1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q69", + 'query': """ select + cd_gender, + cd_marital_status, + cd_education_status, + count(*) cnt1, + cd_purchase_estimate, + count(*) cnt2, + cd_credit_rating, + count(*) cnt3 + from + customer c,customer_address ca,customer_demographics + where + c.c_current_addr_sk = ca.ca_address_sk and + ca_state in ('{STATE1}','{STATE2}','{STATE3}') and + cd_demo_sk = c.c_current_cdemo_sk and + exists (select * + from store_sales,date_dim + where c.c_customer_sk = ss_customer_sk and + ss_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} and {MONTH}+2) and + (not exists (select * + from web_sales,date_dim + where c.c_customer_sk = ws_bill_customer_sk and + ws_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} and {MONTH}+2) and + not exists (select * + from catalog_sales,date_dim + where c.c_customer_sk = cs_ship_customer_sk and + cs_sold_date_sk = d_date_sk and + d_year = {YEAR} and + d_moy between {MONTH} and {MONTH}+2)) + group by cd_gender, + cd_marital_status, + cd_education_status, + cd_purchase_estimate, + cd_credit_rating + order by cd_gender, + cd_marital_status, + cd_education_status, + cd_purchase_estimate, + cd_credit_rating + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [1,4] + }, + 'STATE': { + 'type': "list", + 'size': 3, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DC","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2004] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q70", + 'query': """ select + sum(ss_net_profit) as total_sum + ,s_state + ,s_county + ,grouping(s_state)+grouping(s_county) as lochierarchy + ,rank() over ( + partition by grouping(s_state)+grouping(s_county), + case when grouping(s_county) = 0 then s_state end + order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,store + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + and s_state in + ( select s_state + from (select s_state as s_state, + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking + from store_sales, store, date_dim + where d_month_seq between {DMS} and {DMS}+11 + and d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + group by s_state + ) tmp1 + where ranking <= 5 + ) + group by s_state,s_county with rollup + order by + lochierarchy desc + ,case when lochierarchy = 0 then s_state end + ,rank_within_parent + limit 100""", + 'DBMS': { + 'MonetDB': """ -- Aggregation by s_state and s_county +SELECT + SUM(ss_net_profit) AS total_sum, + s_state, + s_county, + 0 AS lochierarchy, -- Indicates detailed level + RANK() OVER ( + PARTITION BY s_state + ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC + ) AS rank_within_parent +FROM + store_sales +JOIN date_dim AS d1 ON d1.d_date_sk = ss_sold_date_sk +JOIN store ON s_store_sk = ss_store_sk +WHERE + d1.d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND s_state IN ( + SELECT s_state + FROM ( + SELECT s_state, + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking + FROM store_sales + JOIN store ON s_store_sk = ss_store_sk + JOIN date_dim ON d_date_sk = ss_sold_date_sk + WHERE d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY s_state + ) AS tmp1 + WHERE ranking <= 5 + ) +GROUP BY s_state, s_county + +UNION ALL + +-- Subtotal by s_state +SELECT + SUM(ss_net_profit) AS total_sum, + s_state, + NULL AS s_county, + 1 AS lochierarchy, -- Indicates subtotal level + RANK() OVER ( + PARTITION BY s_state + ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC + ) AS rank_within_parent +FROM + store_sales +JOIN date_dim AS d1 ON d1.d_date_sk = ss_sold_date_sk +JOIN store ON s_store_sk = ss_store_sk +WHERE + d1.d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND s_state IN ( + SELECT s_state + FROM ( + SELECT s_state, + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking + FROM store_sales + JOIN store ON s_store_sk = ss_store_sk + JOIN date_dim ON d_date_sk = ss_sold_date_sk + WHERE d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY s_state + ) AS tmp1 + WHERE ranking <= 5 + ) +GROUP BY s_state + +UNION ALL + +-- Grand total +SELECT + SUM(ss_net_profit) AS total_sum, + NULL AS s_state, + NULL AS s_county, + 2 AS lochierarchy, -- Indicates grand total level + NULL AS rank_within_parent +FROM + store_sales +JOIN date_dim AS d1 ON d1.d_date_sk = ss_sold_date_sk +JOIN store ON s_store_sk = ss_store_sk +WHERE + d1.d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND s_state IN ( + SELECT s_state + FROM ( + SELECT s_state, + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking + FROM store_sales + JOIN store ON s_store_sk = ss_store_sk + JOIN date_dim ON d_date_sk = ss_sold_date_sk + WHERE d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY s_state + ) AS tmp1 + WHERE ranking <= 5 + ) +ORDER BY + lochierarchy DESC, + s_state, + rank_within_parent +LIMIT 100""", + 'PostgreSQL': """ select + sum(ss_net_profit) as total_sum + ,s_state + ,s_county + ,grouping(s_state)+grouping(s_county) as lochierarchy + ,rank() over ( + partition by grouping(s_state)+grouping(s_county), + case when grouping(s_county) = 0 then s_state end + order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,store + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + and s_state in + ( select s_state + from (select s_state as s_state, + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking + from store_sales, store, date_dim + where d_month_seq between {DMS} and {DMS}+11 + and d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + group by s_state + ) tmp1 + where ranking <= 5 + ) + group by rollup(s_state,s_county) + order by + lochierarchy desc + ,case when grouping(s_state)+grouping(s_county) = 0 then s_state end + ,rank_within_parent + limit 100""", + 'Exasol': """ select + sum(ss_net_profit) as total_sum + ,s_state + ,s_county + ,grouping(s_state)+grouping(s_county) as lochierarchy + ,rank() over ( + partition by grouping(s_state)+grouping(s_county), + case when grouping(s_county) = 0 then s_state end + order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,store + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + and s_state in + ( select s_state + from (select s_state as s_state, + rank() over ( partition by s_state order by sum(ss_net_profit) is not null, sum(ss_net_profit) desc) as ranking + from store_sales, store, date_dim + where d_month_seq between {DMS} and {DMS}+11 + and d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + group by s_state + ) tmp1 + where ranking <= 5 + ) + group by rollup('s_state','s_county') + order by + lochierarchy desc + ,case when lochierarchy = 0 then s_state end + ,rank_within_parent + limit 100""", + 'MemSQL': """ select + sum(ss_net_profit) as total_sum + ,s_state + ,s_county + ,grouping(s_state)+grouping(s_county) as lochierarchy + ,rank() over ( + partition by grouping(s_state)+grouping(s_county), + case when grouping(s_county) = 0 then s_state end + order by sum(ss_net_profit) desc) as rank_within_parent + from + store_sales + ,date_dim d1 + ,store + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + and s_state in + ( select s_state + from (select s_state as s_state, + rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking + from store_sales, store, date_dim + where d_month_seq between {DMS} and {DMS}+11 + and d_date_sk = ss_sold_date_sk + and s_store_sk = ss_store_sk + group by s_state + ) tmp1 + where ranking <= 5 + ) + group by rollup('s_state','s_county') + order by + lochierarchy desc + ,case when lochierarchy = 0 then s_state end + ,rank_within_parent + limit 100""", + 'MariaDB': """SELECT * +FROM ( + SELECT + SUM(ss_net_profit) AS total_sum, + s_state, + s_county, + CASE WHEN s_state IS NULL THEN 1 ELSE 0 END + + CASE WHEN s_county IS NULL THEN 1 ELSE 0 END AS lochierarchy, + RANK() OVER ( + PARTITION BY + CASE WHEN s_state IS NULL THEN 1 ELSE 0 END + + CASE WHEN s_county IS NULL THEN 1 ELSE 0 END, + CASE WHEN s_county IS NOT NULL THEN s_state END + ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC + ) AS rank_within_parent + FROM + store_sales + JOIN date_dim AS d1 ON d1.d_date_sk = ss_sold_date_sk + JOIN store ON s_store_sk = ss_store_sk + WHERE + d1.d_month_seq BETWEEN {DMS} AND {DMS} + 11 + AND s_state IN ( + SELECT s_state + FROM ( + SELECT s_state, + RANK() OVER (PARTITION BY s_state ORDER BY SUM(ss_net_profit) is not null, SUM(ss_net_profit) DESC) AS ranking + FROM store_sales + JOIN store ON s_store_sk = ss_store_sk + JOIN date_dim ON d_date_sk = ss_sold_date_sk + WHERE d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY s_state + ) AS tmp1 + WHERE ranking <= 5 + ) + GROUP BY s_state, s_county WITH ROLLUP +) AS aggregated_data +ORDER BY + lochierarchy DESC, + CASE WHEN lochierarchy = 0 THEN s_state END, + rank_within_parent +LIMIT 100""" + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q71", + 'query': """select i_brand_id brand_id, i_brand brand,t_hour,t_minute, + sum(ext_price) ext_price + from item, (select ws_ext_sales_price as ext_price, + ws_sold_date_sk as sold_date_sk, + ws_item_sk as sold_item_sk, + ws_sold_time_sk as time_sk + from web_sales,date_dim + where d_date_sk = ws_sold_date_sk + and d_moy={MONTH} + and d_year={YEAR} + union all + select cs_ext_sales_price as ext_price, + cs_sold_date_sk as sold_date_sk, + cs_item_sk as sold_item_sk, + cs_sold_time_sk as time_sk + from catalog_sales,date_dim + where d_date_sk = cs_sold_date_sk + and d_moy={MONTH} + and d_year={YEAR} + union all + select ss_ext_sales_price as ext_price, + ss_sold_date_sk as sold_date_sk, + ss_item_sk as sold_item_sk, + ss_sold_time_sk as time_sk + from store_sales,date_dim + where d_date_sk = ss_sold_date_sk + and d_moy={MONTH} + and d_year={YEAR} + ) tmp,time_dim + where + sold_item_sk = i_item_sk + and i_manager_id=1 + and time_sk = t_time_sk + and (t_meal_time = 'breakfast' or t_meal_time = 'dinner') + group by i_brand, i_brand_id,t_hour,t_minute + order by ext_price desc, i_brand_id + """, + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q72", + 'query': """ select i_item_desc + ,w_warehouse_name + ,d1.d_week_seq + ,sum(case when p_promo_sk is null then 1 else 0 end) no_promo + ,sum(case when p_promo_sk is not null then 1 else 0 end) promo + ,count(*) total_cnt + from catalog_sales + join inventory on (cs_item_sk = inv_item_sk) + join warehouse on (w_warehouse_sk=inv_warehouse_sk) + join item on (i_item_sk = cs_item_sk) + join customer_demographics on (cs_bill_cdemo_sk = cd_demo_sk) + join household_demographics on (cs_bill_hdemo_sk = hd_demo_sk) + join date_dim d1 on (cs_sold_date_sk = d1.d_date_sk) + join date_dim d2 on (inv_date_sk = d2.d_date_sk) + join date_dim d3 on (cs_ship_date_sk = d3.d_date_sk) + left outer join promotion on (cs_promo_sk=p_promo_sk) + left outer join catalog_returns on (cr_item_sk = cs_item_sk and cr_order_number = cs_order_number) + where d1.d_week_seq = d2.d_week_seq + and inv_quantity_on_hand < cs_quantity + and d3.d_date > d1.d_date + interval '5' day + and hd_buy_potential = '{BP}' + and d1.d_year = {YEAR} + and cd_marital_status = '{MS}' + group by i_item_desc,w_warehouse_name,d1.d_week_seq + order by total_cnt desc, i_item_desc is not null, i_item_desc, w_warehouse_name is not null, w_warehouse_name, d_week_seq + limit 100""", + 'parameter': + { + 'BP': { + 'type': "list", + 'range': ["1001-5000",">10000","501-1000"] + }, + 'MS': { + 'type': "list", + 'range': ["M","S","D","W","U"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q73", + 'query': """select c_last_name + ,c_first_name + ,c_salutation + ,c_preferred_cust_flag + ,ss_ticket_number + ,cnt from + (select ss_ticket_number + ,ss_customer_sk + ,count(*) cnt + from store_sales,date_dim,store,household_demographics + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_store_sk = store.s_store_sk + and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk + and date_dim.d_dom between 1 and 2 + and (household_demographics.hd_buy_potential = '{BPONE}' or + household_demographics.hd_buy_potential = '{BPTWO}') + and household_demographics.hd_vehicle_count > 0 + and case when household_demographics.hd_vehicle_count > 0 then + household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null end > 1 + and date_dim.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + and store.s_county in ('{COUNTY1}','{COUNTY2}','{COUNTY3}','{COUNTY4}') + group by ss_ticket_number,ss_customer_sk) dj,customer + where ss_customer_sk = c_customer_sk + and cnt between 1 and 5 + order by cnt desc, c_last_name asc""", + 'parameter': + { + 'BPONE': { + 'type': "list", + 'range': ["1001-5000",">10000","501-1000"] + }, + 'BPTWO': { + 'type': "list", + 'range': ["0-500","Unknown","5001-10000"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + }, + 'COUNTY': { + 'type': "list", + 'size': 4, + 'range': ["Ziebach County","Zavala County","Zapata County","Yuma County","Yukon-Koyukuk Census Area","Yuba County","Young County","York County","Yolo County","Yoakum County","Yellowstone County","Yellow Medicine County","Yell County","Yazoo County","Yavapai County","Yates County","Yankton County","Yancey County","Yamhill County","Yalobusha County","Yakutat Borough","Yakima County","Yadkin County","Wythe County","Wyoming County","Wyandotte County","Wyandot County","Wright County","Worth County","Worcester County","Woodward County","Woodson County","Woods County","Woodruff County","Woodford County","Woodbury County","Wood County","Wolfe County","Wise County","Wirt County","Winston County","Winona County","Winneshiek County","Winnebago County","Winn Parish","Winkler County","Windsor County","Windham County","Winchester city","Wilson County","Williamson County","Williamsburg County","Williamsburg city","Williams County","Willacy County","Will County","Wilkinson County","Wilkin County","Wilkes County","Wilcox County","Wilbarger County","Wicomico County","Wichita County","Wibaux County","Whitman County","Whitley County","Whitfield County","Whiteside County","White Pine County","White County","Wheeler County","Wheatland County","Whatcom County","Wharton County","Wexford County","Wetzel County","Weston County","Westmoreland County","Westchester County","West Feliciana Parish","West Carroll Parish","West Baton Rouge Parish","Wells County","Weld County","Webster Parish","Webster County","Weber County","Webb County","Weakley County","Waynesboro city","Wayne County","Waushara County","Waupaca County","Waukesha County","Watonwan County","Watauga County","Washtenaw County","Washoe County","Washita County","Washington Parish","Washington County","Washburn County","Washakie County","Waseca County","Wasco County","Wasatch County","Warrick County","Warren County","Ware County","Ward County","Wapello County","Walworth County","Walton County","Walthall County","Walsh County","Wallowa County","Waller County","Wallace County","Walla Walla County","Walker County","Waldo County","Wakulla County","Wake County","Wahkiakum County","Wagoner County","Wadena County","Wade Hampton Census Area","Wabaunsee County","Wabasha County","Wabash County","Volusia County","Virginia Beach city","Vinton County","Vilas County","Vigo County","Victoria County","Vernon Parish","Vernon County","Vermillion County","Vermilion Parish","Vermilion County","Ventura County","Venango County","Vanderburgh County","Vance County","Van Zandt County","Van Wert County","Van Buren County","Valley County","Valencia County","Valdez-Cordova Census Area","Val Verde County","Uvalde County","Utah County","Upton County","Upson County","Upshur County","Union Parish","Union County","Unicoi County","Umatilla County","Ulster County","Uintah County","Uinta County","Tyrrell County","Tyler County","Twin Falls County","Twiggs County","Tuscola County","Tuscarawas County","Tuscaloosa County","Turner County","Tuolumne County","Tunica County","Tulsa County","Tulare County","Tucker County","Trumbull County","Trousdale County","Troup County","Tripp County","Trinity County","Trimble County","Trigg County","Treutlen County","Trempealeau County","Trego County","Treasure County","Travis County","Traverse County","Transylvania County","Traill County","Towns County","Towner County","Torrance County","Toombs County","Toole County","Tooele County","Tompkins County","Tom Green County","Tolland County","Todd County","Titus County","Tishomingo County","Tipton County","Tippecanoe County","Tippah County","Tioga County","Tillman County","Tillamook County","Tift County","Thurston County","Throckmorton County","Thomas County","Thayer County","Texas County","Teton County","Terry County","Terrell County","Terrebonne Parish","Tensas Parish","Teller County","Telfair County","Tehama County","Tazewell County","Taylor County","Tattnall County","Tate County","Tarrant County","Taos County","Tangipahoa Parish","Taney County","Tama County","Tallapoosa County","Tallahatchie County","Talladega County","Taliaferro County","Talbot County","Switzerland County","Swisher County","Swift County","Sweetwater County","Sweet Grass County","Swain County","Suwannee County","Sutton County","Sutter County","Sussex County","Susquehanna County","Surry County","Sunflower County","Sumter County","Sumner County","Summit County","Summers County","Sully County","Sullivan County","Suffolk County","Suffolk city","Sublette County","Stutsman County","Strafford County","Story County","Storey County","Stonewall County","Stone County","Stokes County","Stoddard County","Stillwater County","Stewart County","Stevens County","Steuben County","Sterling County","Stephenson County","Stephens County","Steele County","Stearns County","Staunton city","Starr County","Starke County","Stark County","Stanton County","Stanly County","Stanley County","Stanislaus County","Stafford County","Spotsylvania County","Spokane County","Spink County","Spencer County","Spartanburg County","Spalding County","Southampton County","Sonoma County","Somervell County","Somerset County","Solano County","Socorro County","Snyder County","Snohomish County","Smyth County","Smith County","Slope County","Skamania County","Skagit County","Sitka Borough","Siskiyou County","Sioux County","Simpson County","Silver Bow County","Sierra County","Sibley County","Shoshone County","Shiawassee County","Sherman County","Sheridan County","Sherburne County","Shenandoah County","Shelby County","Sheboygan County","Shawnee County","Shawano County","Shasta County","Sharp County","Sharkey County","Shannon County","Shackelford County","Seward County","Sevier County","Sequoyah County","Sequatchie County","Seneca County","Seminole County","Sedgwick County","Sebastian County","Searcy County","Scurry County","Screven County","Scotts Bluff County","Scott County","Scotland County","Scioto County","Schuylkill County","Schuyler County","Schoolcraft County","Schoharie County","Schley County","Schleicher County","Schenectady County","Sawyer County","Saunders County","Sauk County","Sarpy County","Sargent County","Saratoga County","Sarasota County","Santa Rosa County","Santa Fe County","Santa Cruz County","Santa Clara County","Santa Barbara County","Sanpete County","Sanilac County","Sangamon County","Sandusky County","Sandoval County","Sanders County","Sanborn County","San Saba County","San Patricio County","San Miguel County","San Mateo County","San Luis Obispo County","San Juan County","San Joaquin County","San Jacinto County","San Francisco County","San Diego County","San Bernardino County","San Benito County","San Augustine County","Sampson County","Saluda County","Salt Lake County","Saline County","Salem County","Salem city","Saguache County","Saginaw County","Sagadahoc County","Sacramento County","Sac County","Sabine Parish","Sabine County","Rutland County","Rutherford County","Russell County","Rusk County","Rush County","Runnels County","Rowan County","Routt County","Ross County","Rosebud County","Roseau County","Roscommon County","Roosevelt County","Rooks County","Rolette County","Rogers County","Roger Mills County","Rockwall County","Rockland County","Rockingham County","Rockdale County","Rockcastle County","Rockbridge County","Rock Island County","Rock County","Robeson County","Robertson County","Roberts County","Roanoke County","Roanoke city","Roane County","Riverside County","Ritchie County","Ripley County","Rio Grande County","Rio Blanco County","Rio Arriba County","Ringgold County","Riley County","Richmond County","Richmond city","Richland Parish","Richland County","Richardson County","Rich County","Rice County","Rhea County","Reynolds County","Republic County","Renville County","Rensselaer County","Reno County","Refugio County","Reeves County","Redwood County","Red Willow County","Red River Parish","Red River County","Red Lake County","Real County","Reagan County","Ray County","Rawlins County","Ravalli County","Rappahannock County","Rapides Parish","Ransom County","Rankin County","Randolph County","Randall County","Ramsey County","Ralls County","Raleigh County","Rains County","Radford city","Racine County","Rabun County","Quitman County","Queens County","Queen Anne County","Quay County","Putnam County","Pushmataha County","Pulaski County","Pueblo County","Prowers County","Providence County","Prince William County","Prince George County","Prince Edward County","Price County","Preston County","Presque Isle County","Presidio County","Prentiss County","Preble County","Pratt County","Prairie County","Powhatan County","Poweshiek County","Power County","Powell County","Powder River County","Potter County","Pottawattamie County","Pottawatomie County","Posey County","Portsmouth city","Porter County","Portage County","Poquoson city","Pope County","Pontotoc County","Pondera County","Polk County","Pointe Coupee Parish","Poinsett County","Pocahontas County","Plymouth County","Plumas County","Pleasants County","Platte County","Plaquemines Parish","Placer County","Piute County","Pittsylvania County","Pittsburg County","Pitt County","Pitkin County","Piscataquis County","Pipestone County","Pinellas County","Pine County","Pinal County","Pima County","Pike County","Pierce County","Pickett County","Pickens County","Pickaway County","Piatt County","Phillips County","Philadelphia County","Phelps County","Pettis County","Petroleum County","Petersburg city","Person County","Pershing County","Perry County","Perquimans County","Perkins County","Pepin County","Peoria County","Penobscot County","Pennington County","Pendleton County","Pender County","Pend Oreille County","Pemiscot County","Pembina County","Pecos County","Pearl River County","Peach County","Payne County","Payette County","Pawnee County","Paulding County","Patrick County","Passaic County","Pasquotank County","Pasco County","Parmer County","Parker County","Parke County","Park County","Panola County","Pamlico County","Palo Pinto County","Palo Alto County","Palm Beach County","Page County","Pacific County","Ozaukee County","Ozark County","Oxford County","Owyhee County","Owsley County","Owen County","Overton County","Outagamie County","Ouray County","Ouachita Parish","Ouachita County","Otter Tail County","Ottawa County","Otsego County","Otoe County","Otero County","Oswego County","Oscoda County","Osceola County","Osborne County","Osage County","Orleans Parish","Orleans County","Oregon County","Orangeburg County","Orange County","Ontonagon County","Ontario County","Onslow County","Onondaga County","Oneida County","Olmsted County","Oliver County","Oldham County","Oktibbeha County","Okmulgee County","Oklahoma County","Okfuskee County","Okeechobee County","Okanogan County","Okaloosa County","Ohio County","Oglethorpe County","Ogle County","Ogemaw County","Oconto County","Oconee County","Ochiltree County","Oceana County","Ocean County","Obion County","Oakland County","O-Brien County","Nye County","Nueces County","Nuckolls County","Noxubee County","Nowata County","Nottoway County","Norton County","Norton city","Northwest Arctic Borough","Northumberland County","Northampton County","North Slope Borough","Norman County","Norfolk County","Norfolk city","Nome Census Area","Nolan County","Nodaway County","Nobles County","Noble County","Niobrara County","Nicollet County","Nicholas County","Niagara County","Nez Perce County","Newton County","Newport News city","Newport County","Newberry County","Newaygo County","New York County","New Madrid County","New London County","New Kent County","New Haven County","New Hanover County","New Castle County","Nevada County","Ness County","Neshoba County","Neosho County","Nemaha County","Nelson County","Navarro County","Navajo County","Natrona County","Natchitoches Parish","Nassau County","Nash County","Napa County","Nantucket County","Nance County","Nacogdoches County","Musselshell County","Muskogee County","Muskingum County","Muskegon County","Muscogee County","Muscatine County","Murray County","Multnomah County","Muhlenberg County","Mower County","Mountrail County","Moultrie County","Motley County","Morton County","Morrow County","Morrison County","Morris County","Morrill County","Morgan County","Morehouse Parish","Mora County","Moore County","Moody County","Montrose County","Montour County","Montmorency County","Montgomery County","Montezuma County","Monterey County","Montcalm County","Montague County","Monroe County","Monongalia County","Monona County","Mono County","Monmouth County","Moniteau County","Mohave County","Moffat County","Modoc County","Mobile County","Mitchell County","Missoula County","Mississippi County","Missaukee County","Minnehaha County","Minidoka County","Mingo County","Mineral County","Miner County","Milwaukee County","Mills County","Miller County","Mille Lacs County","Millard County","Milam County","Mifflin County","Midland County","Middlesex County","Miami County","Metcalfe County","Mesa County","Merrimack County","Merrick County","Meriwether County","Mercer County","Merced County","Menominee County","Menifee County","Mendocino County","Menard County","Mellette County","Meigs County","Meeker County","Medina County","Mecosta County","Mecklenburg County","Meagher County","Meade County","McPherson County","McNairy County","McMullen County","McMinn County","McLeod County","McLennan County","McLean County","McKinley County","McKenzie County","McKean County","McIntosh County","McHenry County","McDuffie County","McDowell County","McDonough County","McDonald County","McCurtain County","McCulloch County","McCreary County","McCracken County","McCormick County","McCook County","McCone County","McClain County","Mayes County","Maverick County","Maury County","Maui County","Mathews County","Matanuska-Susitna Borough","Matagorda County","Massac County","Mason County","Martinsville city","Martin County","Marshall County","Marquette County","Marlboro County","Mariposa County","Marion County","Marinette County","Marin County","Maries County","Maricopa County","Marengo County","Marathon County","Manitowoc County","Manistee County","Manatee County","Manassas Park city","Manassas city","Malheur County","Major County","Mahoning County","Mahnomen County","Mahaska County","Magoffin County","Madison Parish","Madison County","Madera County","Macoupin County","Macon County","Macomb County","Mackinac County","Lyon County","Lynn County","Lynchburg city","Lyman County","Lycoming County","Luzerne County","Lunenburg County","Luna County","Lumpkin County","Luce County","Lucas County","Lubbock County","Lowndes County","Loving County","Love County","Loup County","Louisa County","Loudoun County","Loudon County","Los Angeles County","Los Alamos County","Lorain County","Lonoke County","Long County","Logan County","Llano County","Livingston Parish","Livingston County","Live Oak County","Little River County","Litchfield County","Lipscomb County","Linn County","Lincoln Parish","Lincoln County","Limestone County","Licking County","Liberty County","Lexington County","Lexington city","Lewis County","Lewis and Clark County","Levy County","Letcher County","Leslie County","Leon County","Lenoir County","Lenawee County","Lemhi County","Lehigh County","Leflore County","Leelanau County","Lee County","Lebanon County","Leavenworth County","Leake County","Lea County","Le Sueur County","Le Flore County","Lawrence County","Lavaca County","Laurens County","Laurel County","Lauderdale County","Latimer County","Latah County","Lassen County","Las Animas County","Larue County","Larimer County","Laramie County","Lapeer County","Lanier County","Langlade County","Lane County","Lander County","Lancaster County","Lampasas County","LaMoure County","Lamoille County","Lamb County","Lamar County","Lake of the Woods County","Lake County","Lake and Peninsula Borough","Lagrange County","Lafourche Parish","Lafayette Parish","Lafayette County","Laclede County","Lackawanna County","Lac qui Parle County","Labette County","La Salle Parish","La Salle County","La Porte County","La Plata County","La Paz County","La Crosse County","Kossuth County","Kosciusko County","Kootenai County","Koochiching County","Kodiak Island Borough","Knox County","Knott County","Klickitat County","Kleberg County","Klamath County","Kittson County","Kittitas County","Kitsap County","Kit Carson County","Kiowa County","Kinney County","Kingsbury County","Kings County","Kingman County","Kingfisher County","King William County","King George County","King County","King and Queen County","Kimble County","Kimball County","Kidder County","Keya Paha County","Keweenaw County","Kewaunee County","Ketchikan Gateway Borough","Kershaw County","Kerr County","Kern County","Keokuk County","Kenton County","Kent County","Kenosha County","Kennebec County","Kenedy County","Kendall County","Kenai Peninsula Borough","Kemper County","Keith County","Kearny County","Kearney County","Kay County","Kaufman County","Kauai County","Karnes County","Kankakee County","Kane County","Kandiyohi County","Kanawha County","Kanabec County","Kalkaska County","Kalamazoo County","Juniata County","Juneau County","Juneau Borough","Judith Basin County","Juab County","Josephine County","Jones County","Johnston County","Johnson County","Jo Daviess County","Jim Wells County","Jim Hogg County","Jewell County","Jessamine County","Jersey County","Jerome County","Jerauld County","Jennings County","Jenkins County","Jefferson Parish","Jefferson Davis Parish","Jefferson Davis County","Jefferson County","Jeff Davis County","Jay County","Jasper County","James City County","Jackson Parish","Jackson County","Jack County","Izard County","Itawamba County","Itasca County","Issaquena County","Isle of Wight County","Island County","Isanti County","Isabella County","Irwin County","Iroquois County","Iron County","Irion County","Iredell County","Iowa County","Iosco County","Ionia County","Inyo County","Ingham County","Indiana County","Indian River County","Independence County","Imperial County","Idaho County","Ida County","Iberville Parish","Iberia Parish","Hyde County","Hutchinson County","Huron County","Huntington County","Huntingdon County","Hunterdon County","Hunt County","Humphreys County","Humboldt County","Hughes County","Huerfano County","Hudspeth County","Hudson County","Hubbard County","Howell County","Howard County","Houston County","Houghton County","Hot Springs County","Hot Spring County","Horry County","Hopkins County","Hopewell city","Hooker County","Hood River County","Hood County","Honolulu County","Holt County","Holmes County","Hoke County","Hodgeman County","Hockley County","Hocking County","Hitchcock County","Hinsdale County","Hinds County","Hillsdale County","Hillsborough County","Hill County","Highlands County","Highland County","Hidalgo County","Hickory County","Hickman County","Hettinger County","Hertford County","Hernando County","Herkimer County","Henry County","Henrico County","Hennepin County","Hendry County","Hendricks County","Henderson County","Hempstead County","Hemphill County","Heard County","Haywood County","Hays County","Hayes County","Hawkins County","Hawaii County","Haskell County","Harvey County","Hartley County","Hartford County","Hart County","Harrisonburg city","Harrison County","Harris County","Harper County","Harney County","Harnett County","Harmon County","Harlan County","Harford County","Hardy County","Harding County","Hardin County","Hardeman County","Hardee County","Haralson County","Hanson County","Hansford County","Hanover County","Hand County","Hancock County","Hampton County","Hampton city","Hampshire County","Hampden County","Hamlin County","Hamilton County","Hamblen County","Hall County","Halifax County","Hale County","Haines Borough","Habersham County","Haakon County","Gwinnett County","Guthrie County","Gunnison County","Gulf County","Guilford County","Guernsey County","Guadalupe County","Grundy County","Grimes County","Griggs County","Grenada County","Gregory County","Gregg County","Greer County","Greenwood County","Greenville County","Greenup County","Greensville County","Greenlee County","Greene County","Greenbrier County","Green Lake County","Green County","Greeley County","Grayson County","Grays Harbor County","Gray County","Graves County","Gratiot County","Granville County","Grant Parish","Grant County","Granite County","Grand Traverse County","Grand Isle County","Grand Forks County","Grand County","Grainger County","Graham County","Grafton County","Grady County","Gove County","Gosper County","Goshen County","Gordon County","Gooding County","Goodhue County","Goochland County","Gonzales County","Goliad County","Golden Valley County","Gogebic County","Glynn County","Gloucester County","Glenn County","Glasscock County","Glascock County","Gladwin County","Glades County","Glacier County","Gilpin County","Gilmer County","Gilliam County","Gillespie County","Giles County","Gilchrist County","Gila County","Gibson County","Georgetown County","George County","Gentry County","Geneva County","Genesee County","Gem County","Geauga County","Geary County","Gates County","Gaston County","Gasconade County","Garza County","Garvin County","Garrett County","Garrard County","Garland County","Garfield County","Garden County","Galveston County","Gallia County","Gallatin County","Galax city","Gaines County","Gage County","Gadsden County","Furnas County","Fulton County","Frontier County","Frio County","Fresno County","Fremont County","Freestone County","Freeborn County","Fredericksburg city","Frederick County","Franklin Parish","Franklin County","Franklin city","Fountain County","Foster County","Fort Bend County","Forsyth County","Forrest County","Forest County","Ford County","Fond du Lac County","Foard County","Fluvanna County","Floyd County","Florence County","Fleming County","Flathead County","Flagler County","Fisher County","Finney County","Fillmore County","Ferry County","Fergus County","Fentress County","Fayette County","Fauquier County","Faulkner County","Faulk County","Faribault County","Fannin County","Falls County","Falls Church city","Fallon County","Fall River County","Fairfield County","Fairfax County","Fairfax city","Fairbanks North Star Borough","Evans County","Evangeline Parish","Eureka County","Etowah County","Estill County","Essex County","Esmeralda County","Escambia County","Erie County","Erath County","Emporia city","Emmons County","Emmet County","Emery County","Emanuel County","Elmore County","Ellsworth County","Ellis County","Elliott County","Elko County","Elkhart County","Elk County","Elbert County","El Paso County","El Dorado County","Effingham County","Edwards County","Edmunds County","Edmonson County","Edgefield County","Edgecombe County","Edgar County","Eddy County","Ector County","Echols County","Eau Claire County","Eaton County","Eastland County","East Feliciana Parish","East Carroll Parish","East Baton Rouge Parish","Early County","Eagle County","Dyer County","Duval County","Dutchess County","Durham County","Duplin County","DuPage County","Dunn County","Dunklin County","Dundy County","Dukes County","Duchesne County","Dubuque County","Dubois County","Drew County","Douglas County","Dougherty County","Dorchester County","Door County","Dooly County","Donley County","Doniphan County","Dona Ana County","Dolores County","Dodge County","Doddridge County","Dixon County","Dixie County","Divide County","District of Columbia","Dinwiddie County","Dimmit County","Dillon County","Dillingham Census Area","Dickson County","Dickinson County","Dickey County","Dickenson County","Dickens County","DeWitt County","Dewey County","Deuel County","DeSoto County","Desha County","Deschutes County","Des Moines County","Denver County","Denton County","Dent County","Denali Borough","Delta County","Delaware County","Del Norte County","DeKalb County","Defiance County","Deer Lodge County","Decatur County","DeBaca County","Dearborn County","Deaf Smith County","De Witt County","De Soto Parish","De Kalb County","Day County","Dawson County","Dawes County","Davison County","Davis County","Daviess County","Davie County","Davidson County","Dauphin County","Darlington County","Darke County","Dare County","Danville city","Daniels County","Dane County","Dallas County","Dallam County","Dale County","Dakota County","Daggett County","Dade County","Cuyahoga County","Custer County","Curry County","Currituck County","Cuming County","Cumberland County","Culpeper County","Cullman County","Culberson County","Crowley County","Crow Wing County","Cross County","Crosby County","Crook County","Crockett County","Crittenden County","Crisp County","Crenshaw County","Creek County","Crawford County","Craven County","Crane County","Craighead County","Craig County","Cowlitz County","Cowley County","Coweta County","Covington County","Covington city","Cottonwood County","Cotton County","Cottle County","Costilla County","Coshocton County","Coryell County","Cortland County","Corson County","Copiah County","Coosa County","Coos County","Cooper County","Cooke County","Cook County","Conway County","Converse County","Contra Costa County","Conejos County","Conecuh County","Concordia Parish","Concho County","Comanche County","Comal County","Colusa County","Columbus County","Columbiana County","Columbia County","Colquitt County","Colorado County","Colonial Heights city","Collingsworth County","Collin County","Collier County","Colleton County","Colfax County","Coles County","Coleman County","Cole County","Colbert County","Coke County","Coffey County","Coffee County","Codington County","Coconino County","Cocke County","Cochran County","Cochise County","Cobb County","Coal County","Coahoma County","Cloud County","Clinton County","Clinch County","Clifton Forge city","Cleveland County","Clermont County","Cleburne County","Clearwater County","Clearfield County","Clear Creek County","Clayton County","Clay County","Clatsop County","Clarke County","Clark County","Clarion County","Clarendon County","Clare County","Clallam County","Claiborne Parish","Claiborne County","Clackamas County","Citrus County","Cimarron County","Cibola County","Churchill County","Christian County","Chowan County","Chouteau County","Choctaw County","Chittenden County","Chisago County","Chippewa County","Chilton County","Childress County","Chicot County","Chickasaw County","Cheyenne County","Chesterfield County","Chester County","Cheshire County","Chesapeake city","Cherry County","Cherokee County","Chenango County","Chemung County","Chelan County","Cheboygan County","Cheatham County","Chaves County","Chautauqua County","Chattooga County","Chattahoochee County","Chatham County","Chase County","Charlton County","Charlottesville city","Charlotte County","Charlevoix County","Charleston County","Charles Mix County","Charles County","Charles City County","Chariton County","Champaign County","Chambers County","Chaffee County","Cerro Gordo County","Centre County","Cedar County","Cecil County","Cayuga County","Cavalier County","Cattaraugus County","Catron County","Catoosa County","Catawba County","Catahoula Parish","Caswell County","Castro County","Cassia County","Cass County","Casey County","Cascade County","Carver County","Carteret County","Carter County","Carson County","Carson City","Carroll County","Caroline County","Carlton County","Carlisle County","Caribou County","Carbon County","Cape May County","Cape Girardeau County","Canyon County","Cannon County","Candler County","Canadian County","Campbell County","Camp County","Cameron Parish","Cameron County","Camden County","Cambria County","Camas County","Calvert County","Calumet County","Calloway County","Callaway County","Callahan County","Calhoun County","Caledonia County","Caldwell Parish","Caldwell County","Calcasieu Parish","Calaveras County","Caddo Parish","Caddo County","Cache County","Cabell County","Cabarrus County","Butts County","Butte County","Butler County","Burt County","Burnett County","Burnet County","Burlington County","Burleson County","Burleigh County","Burke County","Bureau County","Buncombe County","Bullock County","Bulloch County","Bullitt County","Buffalo County","Buena Vista County","Buena Vista city","Bucks County","Buckingham County","Buchanan County","Bryan County","Brunswick County","Brule County","Brown County","Broward County","Broome County","Brooks County","Brookings County","Brooke County","Bronx County","Broadwater County","Bristol County","Bristol city","Bristol Bay Borough","Briscoe County","Brewster County","Brevard County","Bremer County","Breckinridge County","Breathitt County","Brazos County","Brazoria County","Braxton County","Brantley County","Branch County","Bradley County","Bradford County","Bracken County","Boyle County","Boyd County","Box Elder County","Box Butte County","Bowman County","Bowie County","Bourbon County","Boundary County","Boulder County","Bottineau County","Botetourt County","Bossier Parish","Bosque County","Borden County","Boone County","Bonneville County","Bonner County","Bond County","Bon Homme County","Bollinger County","Bolivar County","Boise County","Blue Earth County","Blount County","Bledsoe County","Bleckley County","Bland County","Blanco County","Blair County","Blaine County","Bladen County","Blackford County","Black Hawk County","Bingham County","Billings County","Big Stone County","Big Horn County","Bienville Parish","Bibb County","Bexar County","Bethel Census Area","Bertie County","Berrien County","Bernalillo County","Berkshire County","Berks County","Berkeley County","Bergen County","Benzie County","Benton County","Bent County","Benson County","Bennington County","Bennett County","Benewah County","Ben Hill County","Beltrami County","Belmont County","Bell County","Belknap County","Bee County","Bedford County","Bedford city","Beckham County","Becker County","Beaverhead County","Beaver County","Beauregard Parish","Beaufort County","Bear Lake County","Beadle County","Baylor County","Bayfield County","Bay County","Baxter County","Bath County","Bates County","Bastrop County","Bartow County","Barton County","Bartholomew County","Barry County","Barrow County","Barron County","Barren County","Barnwell County","Barnstable County","Barnes County","Barbour County","Barber County","Baraga County","Bannock County","Banner County","Banks County","Bandera County","Bamberg County","Baltimore County","Baltimore city","Ballard County","Baldwin County","Baker County","Bailey County","Bacon County","Baca County","Avoyelles Parish","Avery County","Autauga County","Austin County","Aurora County","Augusta County","Auglaize County","Audubon County","Audrain County","Attala County","Atoka County","Atlantic County","Atkinson County","Athens County","Atchison County","Atascosa County","Assumption Parish","Asotin County","Ashtabula County","Ashley County","Ashland County","Ashe County","Ascension Parish","Arthur County","Aroostook County","Armstrong County","Arlington County","Arkansas County","Arenac County","Archuleta County","Archer County","Arapahoe County","Aransas County","Appomattox County","Appling County","Appanoose County","Apache County","Antrim County","Antelope County","Anson County","Anoka County","Anne Arundel County","Angelina County","Androscoggin County","Andrews County","Andrew County","Anderson County","Anchorage Borough","Amite County","Amherst County","Amelia County","Amador County","Alpine County","Alpena County","Allendale County","Allen Parish","Allen County","Allegheny County","Alleghany County","Allegany County","Allegan County","Allamakee County","Alger County","Alfalfa County","Alexandria city","Alexander County","Aleutians West Census Area","Aleutians East Borough","Alcorn County","Alcona County","Albemarle County","Albany County","Alamosa County","Alameda County","Alamance County","Alachua County","Aitkin County","Aiken County","Addison County","Adams County","Adair County","Ada County","Accomack County","Acadia Parish","Abbeville County"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q74", + 'query': """with year_total as ( + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,d_year as years + ,{AGGONE}(ss_net_paid) year_total + ,'s' sale_type + from customer + ,store_sales + ,date_dim + where c_customer_sk = ss_customer_sk + and ss_sold_date_sk = d_date_sk + and d_year in ({YEAR},{YEAR}+1) + group by c_customer_id + ,c_first_name + ,c_last_name + ,d_year + union all + select c_customer_id customer_id + ,c_first_name customer_first_name + ,c_last_name customer_last_name + ,d_year as years + ,{AGGONE}(ws_net_paid) year_total + ,'w' sale_type + from customer + ,web_sales + ,date_dim + where c_customer_sk = ws_bill_customer_sk + and ws_sold_date_sk = d_date_sk + and d_year in ({YEAR},{YEAR}+1) + group by c_customer_id + ,c_first_name + ,c_last_name + ,d_year + ) + select + t_s_secyear.customer_id, t_s_secyear.customer_first_name, t_s_secyear.customer_last_name + from year_total t_s_firstyear + ,year_total t_s_secyear + ,year_total t_w_firstyear + ,year_total t_w_secyear + where t_s_secyear.customer_id = t_s_firstyear.customer_id + and t_s_firstyear.customer_id = t_w_secyear.customer_id + and t_s_firstyear.customer_id = t_w_firstyear.customer_id + and t_s_firstyear.sale_type = 's' + and t_w_firstyear.sale_type = 'w' + and t_s_secyear.sale_type = 's' + and t_w_secyear.sale_type = 'w' + and t_s_firstyear.years = {YEAR} + and t_s_secyear.years = {YEAR}+1 + and t_w_firstyear.years = {YEAR} + and t_w_secyear.years = {YEAR}+1 + and t_s_firstyear.year_total > 0 + and t_w_firstyear.year_total > 0 + and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end + > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end + order by {ORDERC1},{ORDERC2},{ORDERC3} + limit 100""", + 'parameter': + { + 'AGGONE': { + 'type': "list", + 'range': ["sum","min","max","avg","stddev_samp"] + }, + 'ORDERC': { + 'type': "integer", + 'size': 3, + 'range': [1,3] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2001] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q75", + 'query': """WITH all_sales AS ( + SELECT d_year + ,i_brand_id + ,i_class_id + ,i_category_id + ,i_manufact_id + ,SUM(sales_cnt) AS sales_cnt + ,SUM(sales_amt) AS sales_amt + FROM (SELECT d_year + ,i_brand_id + ,i_class_id + ,i_category_id + ,i_manufact_id + ,cs_quantity - COALESCE(cr_return_quantity,0) AS sales_cnt + ,cs_ext_sales_price - COALESCE(cr_return_amount,0.0) AS sales_amt + FROM catalog_sales JOIN item ON i_item_sk=cs_item_sk + JOIN date_dim ON d_date_sk=cs_sold_date_sk + LEFT JOIN catalog_returns ON (cs_order_number=cr_order_number + AND cs_item_sk=cr_item_sk) + WHERE i_category='{CATEGORY}' + UNION + SELECT d_year + ,i_brand_id + ,i_class_id + ,i_category_id + ,i_manufact_id + ,ss_quantity - COALESCE(sr_return_quantity,0) AS sales_cnt + ,ss_ext_sales_price - COALESCE(sr_return_amt,0.0) AS sales_amt + FROM store_sales JOIN item ON i_item_sk=ss_item_sk + JOIN date_dim ON d_date_sk=ss_sold_date_sk + LEFT JOIN store_returns ON (ss_ticket_number=sr_ticket_number + AND ss_item_sk=sr_item_sk) + WHERE i_category='{CATEGORY}' + UNION + SELECT d_year + ,i_brand_id + ,i_class_id + ,i_category_id + ,i_manufact_id + ,ws_quantity - COALESCE(wr_return_quantity,0) AS sales_cnt + ,ws_ext_sales_price - COALESCE(wr_return_amt,0.0) AS sales_amt + FROM web_sales JOIN item ON i_item_sk=ws_item_sk + JOIN date_dim ON d_date_sk=ws_sold_date_sk + LEFT JOIN web_returns ON (ws_order_number=wr_order_number + AND ws_item_sk=wr_item_sk) + WHERE i_category='{CATEGORY}') sales_detail + GROUP BY d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id) + SELECT prev_yr.d_year AS prev_year + ,curr_yr.d_year AS years + ,curr_yr.i_brand_id + ,curr_yr.i_class_id + ,curr_yr.i_category_id + ,curr_yr.i_manufact_id + ,prev_yr.sales_cnt AS prev_yr_cnt + ,curr_yr.sales_cnt AS curr_yr_cnt + ,curr_yr.sales_cnt-prev_yr.sales_cnt AS sales_cnt_diff + ,curr_yr.sales_amt-prev_yr.sales_amt AS sales_amt_diff + FROM all_sales curr_yr, all_sales prev_yr + WHERE curr_yr.i_brand_id=prev_yr.i_brand_id + AND curr_yr.i_class_id=prev_yr.i_class_id + AND curr_yr.i_category_id=prev_yr.i_category_id + AND curr_yr.i_manufact_id=prev_yr.i_manufact_id + AND curr_yr.d_year={YEAR} + AND prev_yr.d_year={YEAR}-1 + AND CAST(curr_yr.sales_cnt AS DECIMAL(17,2))/CAST(prev_yr.sales_cnt AS DECIMAL(17,2))<0.9 + ORDER BY sales_cnt_diff,sales_amt_diff + limit 100""", + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q76", + 'query': """ select channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt, SUM(ext_sales_price) sales_amt FROM ( + SELECT 'store' as channel, '{NULLCOLSS}' col_name, d_year, d_qoy, i_category, ss_ext_sales_price ext_sales_price + FROM store_sales, item, date_dim + WHERE {NULLCOLSS} IS NULL + AND ss_sold_date_sk=d_date_sk + AND ss_item_sk=i_item_sk + UNION ALL + SELECT 'web' as channel, '{NULLCOLWS}' col_name, d_year, d_qoy, i_category, ws_ext_sales_price ext_sales_price + FROM web_sales, item, date_dim + WHERE {NULLCOLWS} IS NULL + AND ws_sold_date_sk=d_date_sk + AND ws_item_sk=i_item_sk + UNION ALL + SELECT 'catalog' as channel, '{NULLCOLCS}' col_name, d_year, d_qoy, i_category, cs_ext_sales_price ext_sales_price + FROM catalog_sales, item, date_dim + WHERE {NULLCOLCS} IS NULL + AND cs_sold_date_sk=d_date_sk + AND cs_item_sk=i_item_sk) foo + GROUP BY channel, col_name, d_year, d_qoy, i_category + ORDER BY channel, col_name, d_year, d_qoy, i_category + limit 100""", + 'parameter': + { + 'NULLCOLCS': { + 'type': "list", + 'range': ["cs_bill_customer_sk","cs_bill_hdemo_sk","cs_bill_addr_sk","cs_ship_customer_sk","cs_ship_cdemo_sk","cs_ship_hdemo_sk","cs_ship_addr_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_promo_sk"] + }, + 'NULLCOLSS': { + 'type': "list", + 'range': ["ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_promo_sk"] + }, + 'NULLCOLWS': { + 'type': "list", + 'range': ["ws_bill_customer_sk","ws_bill_hdemo_sk","ws_bill_addr_sk","ws_ship_customer_sk","ws_ship_cdemo_sk","ws_ship_hdemo_sk","ws_ship_addr_sk","ws_web_page_sk","ws_web_site_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_promo_sk"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q77", + 'query': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by channel, id with rollup + order by channel + ,id + limit 100""", + 'DBMS': { + 'MariaDB': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by channel, id with rollup + limit 100""", + 'MonetDB': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'PostgreSQL': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'Exasol': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'MemSQL': """with ss as + (select s_store_sk, + sum(ss_ext_sales_price) as sales, + sum(ss_net_profit) as profit + from store_sales, + date_dim, + store + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + group by s_store_sk) + , + sr as + (select s_store_sk, + sum(sr_return_amt) as returns_angepasst, + sum(sr_net_loss) as profit_loss + from store_returns, + date_dim, + store + where sr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and sr_store_sk = s_store_sk + group by s_store_sk), + cs as + (select cs_call_center_sk, + sum(cs_ext_sales_price) as sales, + sum(cs_net_profit) as profit + from catalog_sales, + date_dim + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cs_call_center_sk + ), + cr as + (select cr_call_center_sk, + sum(cr_return_amount) as returns_angepasst, + sum(cr_net_loss) as profit_loss + from catalog_returns, + date_dim + where cr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + group by cr_call_center_sk + ), + ws as + ( select wp_web_page_sk, + sum(ws_ext_sales_price) as sales, + sum(ws_net_profit) as profit + from web_sales, + date_dim, + web_page + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_page_sk = wp_web_page_sk + group by wp_web_page_sk), + wr as + (select wp_web_page_sk, + sum(wr_return_amt) as returns_angepasst, + sum(wr_net_loss) as profit_loss + from web_returns, + date_dim, + web_page + where wr_returned_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and wr_web_page_sk = wp_web_page_sk + group by wp_web_page_sk) + select channel + , id + , sum(sales) as sales + , sum(returns_angepasst) as returns_angepasst + , sum(profit) as profit + from + (select 'store channel' as channel + , ss.s_store_sk as id + , sales + , coalesce(returns_angepasst, 0) as returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ss left join sr + on ss.s_store_sk = sr.s_store_sk + union all + select 'catalog channel' as channel + , cs_call_center_sk as id + , sales + , returns_angepasst + , (profit - profit_loss) as profit + from cs + , cr + union all + select 'web channel' as channel + , ws.wp_web_page_sk as id + , sales + , coalesce(returns_angepasst, 0) returns_angepasst + , (profit - coalesce(profit_loss,0)) as profit + from ws left join wr + on ws.wp_web_page_sk = wr.wp_web_page_sk + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + }, + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,30] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q78", + 'query': """with ws as + (select d_year AS ws_sold_year, ws_item_sk, + ws_bill_customer_sk ws_customer_sk, + sum(ws_quantity) ws_qty, + sum(ws_wholesale_cost) ws_wc, + sum(ws_sales_price) ws_sp + from web_sales + left join web_returns on wr_order_number=ws_order_number and ws_item_sk=wr_item_sk + join date_dim on ws_sold_date_sk = d_date_sk + where wr_order_number is null + group by d_year, ws_item_sk, ws_bill_customer_sk + ), + cs as + (select d_year AS cs_sold_year, cs_item_sk, + cs_bill_customer_sk cs_customer_sk, + sum(cs_quantity) cs_qty, + sum(cs_wholesale_cost) cs_wc, + sum(cs_sales_price) cs_sp + from catalog_sales + left join catalog_returns on cr_order_number=cs_order_number and cs_item_sk=cr_item_sk + join date_dim on cs_sold_date_sk = d_date_sk + where cr_order_number is null + group by d_year, cs_item_sk, cs_bill_customer_sk + ), + ss as + (select d_year AS ss_sold_year, ss_item_sk, + ss_customer_sk, + sum(ss_quantity) ss_qty, + sum(ss_wholesale_cost) ss_wc, + sum(ss_sales_price) ss_sp + from store_sales + left join store_returns on sr_ticket_number=ss_ticket_number and ss_item_sk=sr_item_sk + join date_dim on ss_sold_date_sk = d_date_sk + where sr_ticket_number is null + group by d_year, ss_item_sk, ss_customer_sk + ) + select + {SELECTONE}, + round(100.*ss_qty/(coalesce(ws_qty,0)+coalesce(cs_qty,0)),2)/100. ratio, + ss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price, + coalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty, + coalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost, + coalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price + from ss + left join ws on (ws_sold_year=ss_sold_year and ws_item_sk=ss_item_sk and ws_customer_sk=ss_customer_sk) + left join cs on (cs_sold_year=ss_sold_year and cs_item_sk=ss_item_sk and cs_customer_sk=ss_customer_sk) + where (coalesce(ws_qty,0)>0 or coalesce(cs_qty, 0)>0) and ss_sold_year={YEAR} + order by + {SELECTONE}, + ss_qty desc, ss_wc desc, ss_sp desc, + other_chan_qty, + other_chan_wholesale_cost, + other_chan_sales_price, + ratio + limit 100""", + 'parameter': + { + 'SELECTONE': { + 'type': "list", + 'range': ["ss_sold_year","ss_item_sk","ss_customer_sk","ss_sold_year, ss_item_sk, ss_customer_sk"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q79", + 'query': """ select + c_last_name,c_first_name,substr(s_city,1,30) city,ss_ticket_number,amt,profit + from + (select ss_ticket_number + ,ss_customer_sk + ,store.s_city + ,sum(ss_coupon_amt) amt + ,sum(ss_net_profit) profit + from store_sales,date_dim,store,household_demographics + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_store_sk = store.s_store_sk + and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk + and (household_demographics.hd_dep_count = {DEPCNT} or household_demographics.hd_vehicle_count > {VEHCNT}) + and date_dim.d_dow = 1 + and date_dim.d_year in ({YEAR},{YEAR}+1,{YEAR}+2) + and store.s_number_employees between 200 and 295 + group by ss_ticket_number,ss_customer_sk,ss_addr_sk,store.s_city) ms,customer + where ss_customer_sk = c_customer_sk + order by c_last_name is not null, c_last_name, c_first_name is not null, c_first_name, substr(s_city,1,30) is not null, substr(s_city,1,30), profit is not null, profit + limit 100""", + 'parameter': + { + 'DEPCNT': { + 'type': "integer", + 'range': [0,9] + }, + 'VEHCNT': { + 'type': "integer", + 'range': [-1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q80", + 'query': """with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by channel, id with rollup + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'DBMS': { + 'MySQL': """with total as (with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by channel, id with rollup) + select * from total + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'MonetDB': """with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by rollup(channel, id) + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'PostgreSQL': """with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by rollup(channel, id) + order by channel is not null, channel + ,id is not null, id + limit 100""", + 'Exasol': """with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + 'MemSQL': """with ssr as + (select s_store_id as store_id, + sum(ss_ext_sales_price) as sales, + sum(coalesce(sr_return_amt, 0)) as returns_amt, + sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit + from store_sales left outer join store_returns on + (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), + date_dim, + store, + item, + promotion + where ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ss_store_sk = s_store_sk + and ss_item_sk = i_item_sk + and i_current_price > 50 + and ss_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by s_store_id) + , + csr as + (select cp_catalog_page_id as catalog_page_id, + sum(cs_ext_sales_price) as sales, + sum(coalesce(cr_return_amount, 0)) as returns_amt, + sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit + from catalog_sales left outer join catalog_returns on + (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), + date_dim, + catalog_page, + item, + promotion + where cs_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and cs_catalog_page_sk = cp_catalog_page_sk + and cs_item_sk = i_item_sk + and i_current_price > 50 + and cs_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by cp_catalog_page_id) + , + wsr as + (select web_site_id, + sum(ws_ext_sales_price) as sales, + sum(coalesce(wr_return_amt, 0)) as returns_amt, + sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit + from web_sales left outer join web_returns on + (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), + date_dim, + web_site, + item, + promotion + where ws_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-08-{DAY}' as date) + and (cast('{YEAR}-08-{DAY}' as date) + interval '30' day) + and ws_web_site_sk = web_site_sk + and ws_item_sk = i_item_sk + and i_current_price > 50 + and ws_promo_sk = p_promo_sk + and p_channel_tv = 'N' + group by web_site_id) + select channel + , id + , sum(sales) as sales + , sum(returns_amt) as returns_amt + , sum(profit) as profit + from + (select 'store channel' as channel + , 'store' || store_id as id + , sales + , returns_amt + , profit + from ssr + union all + select 'catalog channel' as channel + , 'catalog_page' || catalog_page_id as id + , sales + , returns_amt + , profit + from csr + union all + select 'web channel' as channel + , 'web_site' || web_site_id as id + , sales + , returns_amt + , profit + from wsr + ) x + group by rollup(channel, id) + order by channel + ,id + limit 100""", + }, + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,30] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q81", + 'query': """with customer_total_return as + (select cr_returning_customer_sk as ctr_customer_sk + ,ca_state as ctr_state, + sum(cr_return_amt_inc_tax) as ctr_total_return + from catalog_returns + ,date_dim + ,customer_address + where cr_returned_date_sk = d_date_sk + and d_year ={YEAR} + and cr_returning_addr_sk = ca_address_sk + group by cr_returning_customer_sk + ,ca_state ) + select c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name + ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset + ,ca_location_type,ctr_total_return + from customer_total_return ctr1 + ,customer_address + ,customer + where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 + from customer_total_return ctr2 + where ctr1.ctr_state = ctr2.ctr_state) + and ca_address_sk = c_current_addr_sk + and ca_state = '{STATE}' + and ctr1.ctr_customer_sk = c_customer_sk + order by c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name + ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset + ,ca_location_type,ctr_total_return + limit 100""", + 'parameter': + { + 'STATE': { + 'type': "list", + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q82", + 'query': """ select i_item_id + ,i_item_desc + ,i_current_price + from item, inventory, date_dim, store_sales + where i_current_price between {PRICE} and {PRICE}+30 + and inv_item_sk = i_item_sk + and d_date_sk=inv_date_sk + and d_date between cast('{YEAR}-{MONTH}-{DAY}' as date) and (cast('{YEAR}-{MONTH}-{DAY}' as date) + interval '60' day) + and i_manufact_id in ({MANUFACT_ID1},{MANUFACT_ID2},{MANUFACT_ID3},{MANUFACT_ID4}) + and inv_quantity_on_hand between 100 and 500 + and ss_item_sk = i_item_sk + group by i_item_id,i_item_desc,i_current_price + order by i_item_id + limit 100""", + 'parameter': + { + 'DAY': { + 'type': "integer", + 'range': [1,24] + }, + 'MANUFACT_ID': { + 'type': "integer", + 'size': 4, + 'range': [1,1000] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'PRICE': { + 'type': "integer", + 'range': [0,90] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q83", + 'query': """with sr_items as + (select i_item_id item_id, + sum(sr_return_quantity) sr_item_qty + from store_returns, + item, + date_dim + where sr_item_sk = i_item_sk + and d_date in + (select d_date + from date_dim + where d_week_seq in + (select d_week_seq + from date_dim + where d_date in ('{YEAR}-{MONTH1}-{DAY1}','{YEAR}-{MONTH2}-{DAY2}','{YEAR}-{MONTH3}-{DAY3}'))) + and sr_returned_date_sk = d_date_sk + group by i_item_id), + cr_items as + (select i_item_id item_id, + sum(cr_return_quantity) cr_item_qty + from catalog_returns, + item, + date_dim + where cr_item_sk = i_item_sk + and d_date in + (select d_date + from date_dim + where d_week_seq in + (select d_week_seq + from date_dim + where d_date in ('{YEAR}-{MONTH1}-{DAY1}','{YEAR}-{MONTH2}-{DAY2}','{YEAR}-{MONTH3}-{DAY3}'))) + and cr_returned_date_sk = d_date_sk + group by i_item_id), + wr_items as + (select i_item_id item_id, + sum(wr_return_quantity) wr_item_qty + from web_returns, + item, + date_dim + where wr_item_sk = i_item_sk + and d_date in + (select d_date + from date_dim + where d_week_seq in + (select d_week_seq + from date_dim + where d_date in ('{YEAR}-{MONTH1}-{DAY1}','{YEAR}-{MONTH2}-{DAY2}','{YEAR}-{MONTH3}-{DAY3}'))) + and wr_returned_date_sk = d_date_sk + group by i_item_id) + select sr_items.item_id + ,sr_item_qty + ,100.0*sr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 sr_dev + ,cr_item_qty + ,100.0*cr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 cr_dev + ,wr_item_qty + ,100.0*wr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 wr_dev + ,(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 average + from sr_items + ,cr_items + ,wr_items + where sr_items.item_id=cr_items.item_id + and sr_items.item_id=wr_items.item_id + order by sr_items.item_id + ,sr_item_qty + limit 100""", + 'parameter': + { + 'DAY1': { + 'type': "integer", + 'range': [1,24] + }, + 'DAY2': { + 'type': "integer", + 'range': [1,24] + }, + 'DAY3': { + 'type': "integer", + 'range': [1,24] + }, + 'MONTH1': { + 'type': "integer", + 'range': [1,7] + }, + 'MONTH2': { + 'type': "integer", + 'range': [8,10] + }, + 'MONTH3': { + 'type': "integer", + 'range': [11,11] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q84", + 'query': """ select c_customer_id as customer_id + , CONCAT(coalesce(c_last_name,'') , ', ' , coalesce(c_first_name,'')) as customername + from customer + ,customer_address + ,customer_demographics + ,household_demographics + ,income_band + ,store_returns + where ca_city = '{CITY}' + and c_current_addr_sk = ca_address_sk + and ib_lower_bound >= {INCOME} + and ib_upper_bound <= {INCOME} + 50000 + and ib_income_band_sk = hd_income_band_sk + and cd_demo_sk = c_current_cdemo_sk + and hd_demo_sk = c_current_hdemo_sk + and sr_cdemo_sk = cd_demo_sk + order by c_customer_id + limit 100""", + 'DBMS': { + 'MonetDB': """ select c_customer_id as customer_id + , coalesce(c_last_name,'') || ', ' || coalesce(c_first_name,'') as customername + from customer + ,customer_address + ,customer_demographics + ,household_demographics + ,income_band + ,store_returns + where ca_city = '{CITY}' + and c_current_addr_sk = ca_address_sk + and ib_lower_bound >= {INCOME} + and ib_upper_bound <= {INCOME} + 50000 + and ib_income_band_sk = hd_income_band_sk + and cd_demo_sk = c_current_cdemo_sk + and hd_demo_sk = c_current_hdemo_sk + and sr_cdemo_sk = cd_demo_sk + order by c_customer_id + limit 100""", + 'PostgreSQL': """ select c_customer_id as customer_id + , coalesce(c_last_name,'') || ', ' || coalesce(c_first_name,'') as customername + from customer + ,customer_address + ,customer_demographics + ,household_demographics + ,income_band + ,store_returns + where ca_city = '{CITY}' + and c_current_addr_sk = ca_address_sk + and ib_lower_bound >= {INCOME} + and ib_upper_bound <= {INCOME} + 50000 + and ib_income_band_sk = hd_income_band_sk + and cd_demo_sk = c_current_cdemo_sk + and hd_demo_sk = c_current_hdemo_sk + and sr_cdemo_sk = cd_demo_sk + order by c_customer_id + limit 100""", + }, + 'parameter': + { + 'CITY': { + 'type': "list", + 'range': ["Aberdeen","Acme","Adams","Adrian","Afton","Albany","Allentown","Allison","Alma","Alpha","Altamont","Amherst","Amity","Andover","Antioch","Appleton","Arcadia","Arcola","Argyle","Arlington","Armstrong","Arthur","Ashland","Ashley","Ashton","Athens","Avery","Avoca","Avon","Bailey","Baker","Barnes","Bartlett","Bath","Bay View","Bayside","Bayview","Bear Creek","Beech Grove","Beechwood","Belfast","Belleview","Belleville","Belmont","Bennett","Benton","Berea","Berlin","Bethany","Bethel","Bethesda","Bethlehem","Big Creek","Birmingham","Blaine","Blair","Blanchard","Bloomingdale","Blue Springs","Bolton","Boyd","Bradford","Bradley","Brandon","Brentwood","Bridgeport","Bristol","Brooklyn","Brooks","Brookville","Brookwood","Brownsville","Brunswick","Bryant","Buckhorn","Buckingham","Buena Vista","Buffalo","Bunker Hill","Burns","Burton","Butler","Byron","Caldwell","Caledonia","Calhoun","California","Cambridge","Camden","Camelot","Canaan","Carlisle","Carlton","Carpenter","Carter","Carthage","Cedar","Cedar Creek","Cedar Grove","Cedar Hill","Center","Center Point","Centerville","Chapel Hill","Charleston","Chatham","Chelsea","Cherokee","Cherry Grove","Cherry Valley","Chester","Chestnut Hill","Chestnut Ridge","Church Hill","Clearview","Clearwater","Clifford","Clifton","Climax","Clinton","Clyde","Coldwater","Colfax","Collinsville","Colonial Heights","Columbia","Columbus","Concord","Conway","Cooper","Cordova","Corinth","Cottonwood","Country Club Estates","Crawford","Crescent","Creston","Crestview","Crossroads","Crystal","Crystal Springs","Cuba","Cumberland","Cunningham","Curtis","Dale","Dallas","Darlington","Decatur","Deer Park","Deerfield","Delmar","Delta","Denmark","Denver","Derby","Dewey","Dover","Doyle","Duncan","Dunlap","Easton","Eastwood","Echo","Edgewater","Edgewood","Edwards","Egypt","Elba","Elgin","Elizabeth","Elkton","Ellisville","Ellsworth","Elm Grove","Elmwood","Empire","Enon","Enterprise","Eureka","Evans","Evansville","Evergreen","Fair Oaks","Fairbanks","Fairfax","Fairfield","Fairmont","Fairmount","Fairview","Farmersville","Farmington","Fayetteville","Ferguson","Ferndale","Fernwood","Fillmore","Fisher","Five Forks","Five Points","Flat Rock","Flatwoods","Flint","Flint Hill","Florence","Floyd","Forest","Forest Hills","Forest Park","Forestville","Foster","Four Points","Fowler","Fox","Frankfort","Franklin","Freedom","Freeman","Freeport","Fremont","Frenchtown","Friendship","Frogtown","Fulton","Galena","Gardner","Garfield","Garrison","Gary","Georgetown","Gilbert","Gilmore","Gladstone","Glencoe","Glendale","Glenville","Glenwood","Globe","Golden","Good Hope","Goshen","Grandview","Granite","Grant","Gravel Hill","Gray","Green Acres","Green Hill","Green Valley","Greenbrier","Greendale","Greenfield","Greenville","Greenwood","Griffin","Guilford","Gum Springs","Guthrie","Hamburg","Hamilton","Hampton","Hardy","Harmon","Harmony","Harper","Harris","Harrisburg","Hartland","Harvey","Hastings","Hawthorne","Hazelwood","Helena","Henry","Hidden Valley","Highland","Highland Park","Hillcrest","Hillsboro","Hillsdale","Hillside","Hilltop","Holiday Hills","Holland","Hollywood","Hopewell","Horton","Houston","Howell","Hubbard","Hunter","Huntington","Huntsville","Hurricane","Hyde Park","Indian Village","Ingleside","Jackson","Jacksonville","Jamestown","Jenkins","Jericho","Jerome","Jimtown","Johnson","Johnsonville","Johnstown","Jones","Jordan","Kelly","Kensington","Kent","Kimball","King","Kingston","Kirkland","Knollwood","La Grange","Lake Forest","Lake View","Lakeland","Lakeside","Lakeview","Lakeville","Lakewood","Lamont","Lancaster","Langdon","Laurel","Lawrence","Lawrenceville","Lebanon","Lee","Leesburg","Leesville","Leland","Lenox","Leon","Lewis","Lewisburg","Lewisville","Liberty","Lincoln","Linden","Lisbon","Little River","Littleton","Lodi","Lone Oak","Lone Pine","Lone Star","Long Branch","Longwood","Louisville","Lucas","Ludlow","Lynn","Macedonia","Macon","Manchester","Mansfield","Maple Grove","Maple Hill","Mapleton","Marietta","Marion","Marshall","Martin","Martinsville","Mason","Maxwell","Mayfield","Maywood","Meadowbrook","Mechanicsburg","Middletown","Midway","Milan","Milford","Millbrook","Milltown","Millwood","Milo","Mineral Springs","Monroe","Montague","Montezuma","Monticello","Montpelier","Montrose","Moore","Morgan","Morgantown","Morris","Morton","Mount Olive","Mount Pleasant","Mount Tabor","Mount Vernon","Mount Zion","Mountain View","Murphy","Murray","Nashville","Nebo","Needmore","New Boston","New Hope","New Salem","New Town","Newark","Newburg","Newport","Newton","Newtown","Nichols","Northwood","Norton","Norwood","Nottingham","Oak Grove","Oak Hill","Oak Ridge","Oakdale","Oakland","Oakley","Oakwood","Omega","Oneida","Orange","Owens","Page","Palmyra","Paradise","Parker","Parkwood","Patterson","Paxton","Payne","Peoria","Perkins","Perry","Peru","Philadelphia","Phillips","Phoenix","Pierce","Pine Grove","Pine Hill","Pine Ridge","Pine Valley","Pinecrest","Pineville","Piney Grove","Pinhook","Pioneer","Pisgah","Plainview","Plainville","Pleasant Grove","Pleasant Hill","Pleasant Valley","Point Pleasant","Pomona","Poplar Grove","Poplar Springs","Post Oak","Powell","Preston","Price","Proctor","Prospect","Prosperity","Providence","Pulaski","Pumpkin Center","Quincy","Randolph","Rankin","Raymond","Red Bank","Red Hill","Red Oak","Red Rock","Redland","Reno","Riceville","Richardson","Richfield","Richland","Richmond","Richville","Ridgeville","Ridgeway","Ridgewood","Riley","River Oaks","Riverdale","Riverside","Riverview","Roberts","Rochester","Rock Hill","Rock Springs","Rockford","Rockland","Rockwood","Rocky Point","Rolling Hills","Roscoe","Rose Hill","Rosebud","Roseville","Rosewood","Rossville","Roxbury","Roy","Royal","Ruby","Ruth","Rutland","Ryan","Saint Clair","Saint George","Saint James","Saint John","Saint Johns","Saint Paul","Salem","San Jose","Sand Hill","Sanford","Saratoga","Sardis","Sawyer","Scotland","Scottsville","Selma","Seneca","Shady Grove","Shamrock","Shannon","Sharon","Shaw","Shawnee","Sheffield","Shelby","Sheridan","Sherman","Sherwood Forest","Shiloh","Shore Acres","Sidney","Siloam","Silver City","Silver Creek","Silver Springs","Simpson","Slabtown","Sleepy Hollow","Smith","Smyrna","Snug Harbor","Somerset","Somerville","Spencer","Spring Grove","Spring Hill","Spring Lake","Spring Valley","Springdale","Springfield","Springhill","Springtown","Springville","Stafford","Star","State Line","Sterling","Stewart","Stony Point","Stratford","Stringtown","Sugar Hill","Sullivan","Sulphur Springs","Summerfield","Summerville","Summit","Sumner","Sunnyside","Sunrise","Sunset Beach","Sunshine","Superior","Sutton","Sycamore","Tabor","Taft","Tanglewood","Texas","The Meadows","Thomas","Thompson","Thompsonville","Three Forks","Tipton","Tracy","Tremont","Trenton","Trinity","Turner","Twin Oaks","Tyler","Tyrone","Union","Union City","Union Hill","Unionville","Unity","Utica","Valley View","Vance","Verona","Victoria","Vienna","Vista","Wakefield","Wallace","Walnut","Walnut Grove","Walton","Ward","Warwick","Washington Heights","Waterford","Waterloo","Waterville","Watkins","Wayland","Wayne","Webb","Welcome","Weldon","Wesley","West End","West Liberty","West Point","Westfield","Westgate","Westminster","Weston","Westport","Westville","Westwood","Wheatland","Whispering Pines","White City","White Hall","White Oak","White Plains","White Rock","Whitesville","Whitney","Wildwood","Willard","Williams","Williamsburg","Williamsville","Willis","Willow","Wilson","Wilton","Winchester","Winfield","Winona","Winslow","Wolf Creek","Woodbine","Woodbury","Woodcrest","Woodland","Woodland Hills","Woodland Park","Woodlawn","Woodrow","Woodruff","Woodside","Woodstock","Woodville","Wright","Wyoming","York","Yorktown","Youngstown"] + }, + 'INCOME': { + 'type': "integer", + 'range': [0,70000] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q85", + 'query': """ select substr(r_reason_desc,1,20) as reason + ,avg(ws_quantity) avg_ws_quantity + ,avg(wr_refunded_cash) avg_wr_refunded_cash + ,avg(wr_fee) avg_wr_fee + from web_sales, web_returns, web_page, customer_demographics cd1, + customer_demographics cd2, customer_address, date_dim, reason + where ws_web_page_sk = wp_web_page_sk + and ws_item_sk = wr_item_sk + and ws_order_number = wr_order_number + and ws_sold_date_sk = d_date_sk and d_year = {YEAR} + and cd1.cd_demo_sk = wr_refunded_cdemo_sk + and cd2.cd_demo_sk = wr_returning_cdemo_sk + and ca_address_sk = wr_refunded_addr_sk + and r_reason_sk = wr_reason_sk + and + ( + ( + cd1.cd_marital_status = '{MS1}' + and + cd1.cd_marital_status = cd2.cd_marital_status + and + cd1.cd_education_status = '{ES1}' + and + cd1.cd_education_status = cd2.cd_education_status + and + ws_sales_price between 100.00 and 150.00 + ) + or + ( + cd1.cd_marital_status = '{MS2}' + and + cd1.cd_marital_status = cd2.cd_marital_status + and + cd1.cd_education_status = '{ES2}' + and + cd1.cd_education_status = cd2.cd_education_status + and + ws_sales_price between 50.00 and 100.00 + ) + or + ( + cd1.cd_marital_status = '{MS3}' + and + cd1.cd_marital_status = cd2.cd_marital_status + and + cd1.cd_education_status = '{ES3}' + and + cd1.cd_education_status = cd2.cd_education_status + and + ws_sales_price between 150.00 and 200.00 + ) + ) + and + ( + ( + ca_country = 'United States' + and + ca_state in ('{STATE1}', '{STATE2}', '{STATE3}') + and ws_net_profit between 100 and 200 + ) + or + ( + ca_country = 'United States' + and + ca_state in ('{STATE4}', '{STATE5}', '{STATE6}') + and ws_net_profit between 150 and 300 + ) + or + ( + ca_country = 'United States' + and + ca_state in ('{STATE7}', '{STATE8}', '{STATE9}') + and ws_net_profit between 50 and 250 + ) + ) + group by r_reason_desc + order by substr(r_reason_desc,1,20) + ,avg(ws_quantity) + ,avg(wr_refunded_cash) + ,avg(wr_fee) + limit 100""", + 'parameter': + { + 'ES': { + 'type': "list", + 'size': 3, + 'range': ["Primary","Secondary","College","2 yr Degree","4 yr Degree", "Advanced Degree","Unknown"] + }, + 'MS': { + 'type': "list", + 'size': 3, + 'range': ["M","S","D","W","U"] + }, + 'STATE': { + 'type': "list", + 'size': 9, + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q86", + 'query': """ select + sum(ws_net_paid) as total_sum + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ws_net_paid) is not null desc, sum(ws_net_paid) desc) as rank_within_parent + from + web_sales + ,date_dim d1 + ,item + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ws_sold_date_sk + and i_item_sk = ws_item_sk + group by i_category,i_class with rollup + order by + lochierarchy desc, + case when lochierarchy = 0 then i_category is not null end, + case when lochierarchy = 0 then i_category end, + rank_within_parent + limit 100""", + 'DBMS': { + 'MariaDB': """SELECT * +FROM ( + SELECT + SUM(ws_net_paid) AS total_sum, + i_category, + i_class, + CASE WHEN i_category IS NULL THEN 1 ELSE 0 END + + CASE WHEN i_class IS NULL THEN 1 ELSE 0 END AS lochierarchy, + RANK() OVER ( + PARTITION BY + CASE WHEN i_category IS NULL THEN 1 ELSE 0 END + + CASE WHEN i_class IS NULL THEN 1 ELSE 0 END, + CASE WHEN i_class IS NOT NULL THEN i_category END + ORDER BY SUM(ws_net_paid) DESC + ) AS rank_within_parent + FROM + web_sales + JOIN date_dim AS d1 ON d1.d_date_sk = ws_sold_date_sk + JOIN item ON i_item_sk = ws_item_sk + WHERE + d1.d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY i_category, i_class WITH ROLLUP +) AS aggregated_data +ORDER BY + lochierarchy DESC, + CASE WHEN lochierarchy = 0 THEN i_category is not null END, + CASE WHEN lochierarchy = 0 THEN i_category END, + rank_within_parent +LIMIT 100""", + 'MonetDB': """with tmp as ( + select + sum(ws_net_paid) as total_sum + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ws_net_paid) desc) as rank_within_parent + from + web_sales + ,date_dim d1 + ,item + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ws_sold_date_sk + and i_item_sk = ws_item_sk + group by rollup(i_category,i_class) + ) + select * from tmp + order by + lochierarchy desc, + case when lochierarchy = 0 then i_category is not null end, + case when lochierarchy = 0 then i_category end, + rank_within_parent + limit 100""", + 'PostgreSQL': """ select + sum(ws_net_paid) as total_sum + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ws_net_paid) desc) as rank_within_parent + from + web_sales + ,date_dim d1 + ,item + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ws_sold_date_sk + and i_item_sk = ws_item_sk + group by rollup(i_category,i_class) + order by + lochierarchy desc, + case when grouping(i_category)+grouping(i_class) = 0 then i_category is not null end, + case when grouping(i_category)+grouping(i_class) = 0 then i_category end, + rank_within_parent + limit 100""", + 'Exasol': """ select + sum(ws_net_paid) as total_sum + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ws_net_paid) desc) as rank_within_parent + from + web_sales + ,date_dim d1 + ,item + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ws_sold_date_sk + and i_item_sk = ws_item_sk + group by rollup(i_category,i_class) + order by + lochierarchy desc, + case when lochierarchy = 0 then i_category end, + rank_within_parent + limit 100""", + 'MemSQL': """ select + sum(ws_net_paid) as total_sum + ,i_category + ,i_class + ,grouping(i_category)+grouping(i_class) as lochierarchy + ,rank() over ( + partition by grouping(i_category)+grouping(i_class), + case when grouping(i_class) = 0 then i_category end + order by sum(ws_net_paid) desc) as rank_within_parent + from + web_sales + ,date_dim d1 + ,item + where + d1.d_month_seq between {DMS} and {DMS}+11 + and d1.d_date_sk = ws_sold_date_sk + and i_item_sk = ws_item_sk + group by rollup(i_category,i_class) + order by + lochierarchy desc, + case when lochierarchy = 0 then i_category end, + rank_within_parent + limit 100""", + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q87", + 'query': """select count(*) as counter + from ((select distinct c_last_name, c_first_name, d_date + from store_sales, date_dim, customer + where store_sales.ss_sold_date_sk = date_dim.d_date_sk + and store_sales.ss_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS}+11) + except + (select distinct c_last_name, c_first_name, d_date + from catalog_sales, date_dim, customer + where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk + and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS}+11) + except + (select distinct c_last_name, c_first_name, d_date + from web_sales, date_dim, customer + where web_sales.ws_sold_date_sk = date_dim.d_date_sk + and web_sales.ws_bill_customer_sk = customer.c_customer_sk + and d_month_seq between {DMS} and {DMS}+11) + ) cool_cust + """, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q88", + 'query': """select * + from + (select count(*) h8_30_to_9 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 8 + and time_dim.t_minute >= 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s1, + (select count(*) h9_to_9_30 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 9 + and time_dim.t_minute < 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s2, + (select count(*) h9_30_to_10 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 9 + and time_dim.t_minute >= 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s3, + (select count(*) h10_to_10_30 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 10 + and time_dim.t_minute < 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s4, + (select count(*) h10_30_to_11 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 10 + and time_dim.t_minute >= 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s5, + (select count(*) h11_to_11_30 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 11 + and time_dim.t_minute < 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s6, + (select count(*) h11_30_to_12 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 11 + and time_dim.t_minute >= 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s7, + (select count(*) h12_to_12_30 + from store_sales, household_demographics , time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = 12 + and time_dim.t_minute < 30 + and ((household_demographics.hd_dep_count = {HOUR1} and household_demographics.hd_vehicle_count<={HOUR1}+2) or + (household_demographics.hd_dep_count = {HOUR2} and household_demographics.hd_vehicle_count<={HOUR2}+2) or + (household_demographics.hd_dep_count = {HOUR3} and household_demographics.hd_vehicle_count<={HOUR3}+2)) + and store.s_store_name = 'ese') s8 + """, + 'parameter': + { + 'HOUR': { + 'type': "integer", + 'size': 3, + 'range': [-1,4] + }, + 'STORE': { + 'type': "list", + 'range': ["Unknown"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q89", + 'query': """select * + from( + select i_category, i_class, i_brand, + s_store_name, s_company_name, + d_moy, + sum(ss_sales_price) sum_sales, + avg(sum(ss_sales_price)) over + (partition by i_category, i_brand, s_store_name, s_company_name) + avg_monthly_sales + from item, store_sales, date_dim, store + where ss_item_sk = i_item_sk and + ss_sold_date_sk = d_date_sk and + ss_store_sk = s_store_sk and + d_year in ({YEAR}) and + ((i_category in ('{CAT1}','{CAT2}','{CAT3}') and + i_class in ('{CLASS1}','{CLASS2}','{CLASS3}') + ) + or (i_category in ('{CAT4}','{CAT5}','{CAT6}') and + i_class in ('{CLASS4}','{CLASS5}','{CLASS6}') + )) + group by i_category, i_class, i_brand, + s_store_name, s_company_name, d_moy) tmp1 + where case when (avg_monthly_sales <> 0) then (abs(sum_sales - avg_monthly_sales) / avg_monthly_sales) else null end > 0.1 + order by sum_sales - avg_monthly_sales, s_store_name + limit 100""", + 'parameter': + { + 'CAT': { + 'type': "list", + 'size': 6, + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + }, + 'CLASS': { + 'type': "list", + 'size': 6, + 'range': ["accent","accessories","archery","arts","athletic","athletic","shoes","audio","automotive","baseball","basketball","bathroom","bedding","birdal","blinds/shades","bracelets","business","camcorders","cameras","camping","classical","computers","consignment","cooking","costume","country","curtains/drapes","custom","decor","diamonds","disk","drives","dresses","dvd/vcr","players","earings","entertainments","estate","fiction","fishing","fitness","flatware","football","fragrances","furniture","glassware","gold","golf","guns","history","hockey","home","repair","infants","jewelry","boxes","karoke","kids","lighting","loose","stones","maternity","mattresses","memory","mens","mens","watch","monitors","musical","mystery","newborn","optics","outdoor","paint","pants","parenting","pendants","personal","pools","pop","portable","reference","rings","rock","romance","rugs","sailing","scanners","school-uniforms","science","self-help","semi-precious","shirts","sports","sports-apparel","stereo","swimwear","tables","televisions","tennis","toddlers","travel","wallpaper","wireless","womens","womens watch"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q90", + 'query': """select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio + from ( select count(*) amc + from web_sales, household_demographics , time_dim, web_page + where ws_sold_time_sk = time_dim.t_time_sk + and ws_ship_hdemo_sk = household_demographics.hd_demo_sk + and ws_web_page_sk = web_page.wp_web_page_sk + and time_dim.t_hour between {HOUR_AM} and {HOUR_AM}+1 + and household_demographics.hd_dep_count = {DEPCNT} + and web_page.wp_char_count between 5000 and 5200) at, + ( select count(*) pmc + from web_sales, household_demographics , time_dim, web_page + where ws_sold_time_sk = time_dim.t_time_sk + and ws_ship_hdemo_sk = household_demographics.hd_demo_sk + and ws_web_page_sk = web_page.wp_web_page_sk + and time_dim.t_hour between {HOUR_PM} and {HOUR_PM}+1 + and household_demographics.hd_dep_count = {DEPCNT} + and web_page.wp_char_count between 5000 and 5200) pt + order by am_pm_ratio + limit 100""", + 'parameter': + { + 'DEPCNT': { + 'type': "integer", + 'range': [0,9] + }, + 'HOUR_AM': { + 'type': "integer", + 'range': [6,12] + }, + 'HOUR_PM': { + 'type': "integer", + 'range': [13,21] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q91", + 'query': """select + cc_call_center_id Call_Center, + cc_name Call_Center_Name, + cc_manager Manager, + sum(cr_net_loss) Returns_Loss + from + call_center, + catalog_returns, + date_dim, + customer, + customer_address, + customer_demographics, + household_demographics + where + cr_call_center_sk = cc_call_center_sk + and cr_returned_date_sk = d_date_sk + and cr_returning_customer_sk= c_customer_sk + and cd_demo_sk = c_current_cdemo_sk + and hd_demo_sk = c_current_hdemo_sk + and ca_address_sk = c_current_addr_sk + and d_year = {YEAR} + and d_moy = {MONTH} + and ( (cd_marital_status = 'M' and cd_education_status = 'Unknown') + or(cd_marital_status = 'W' and cd_education_status = 'Advanced Degree')) + and hd_buy_potential like '{BUY_POTENTIAL}%' + and ca_gmt_offset = {GMT} + group by cc_call_center_id,cc_name,cc_manager,cd_marital_status,cd_education_status + order by sum(cr_net_loss) desc""", + 'parameter': + { + 'BUY_POTENTIAL': { + 'type': "list", + 'range': ["1001-5000",">10000","501-1000","0-500","Unknown","5001-10000"] + }, + 'GMT': { + 'type': "list", + 'range': ["-6","-7"] + }, + 'MONTH': { + 'type': "integer", + 'range': [11,12] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q92", + 'query': """ select + sum(ws_ext_discount_amt) as Excess_Discount_Amount + from + web_sales + ,item + ,date_dim + where + i_manufact_id = {IMID} + and i_item_sk = ws_item_sk + and d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '90' day) + and d_date_sk = ws_sold_date_sk + and ws_ext_discount_amt + > ( + SELECT + 1.3 * avg(ws_ext_discount_amt) + FROM + web_sales + ,date_dim + WHERE + ws_item_sk = i_item_sk + and d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '90' day) + and d_date_sk = ws_sold_date_sk + ) + order by sum(ws_ext_discount_amt) + limit 100""", + 'parameter': + { + 'IMID': { + 'type': "integer", + 'range': [1,1000] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,4] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q93", + 'query': """with total as (select ss_customer_sk + ,sum(act_sales) sumsales + from (select ss_item_sk + ,ss_ticket_number + ,ss_customer_sk + ,case when sr_return_quantity is not null then (ss_quantity-sr_return_quantity)*ss_sales_price + else (ss_quantity*ss_sales_price) end act_sales + from store_sales left outer join store_returns on (sr_item_sk = ss_item_sk + and sr_ticket_number = ss_ticket_number) + ,reason + where sr_reason_sk = r_reason_sk + and r_reason_desc = '{REASON}') t + group by ss_customer_sk) + select * from total + order by sumsales is not null, sumsales, ss_customer_sk + limit 100""", + 'parameter': + { + 'REASON': { + 'type': "list", + 'range': ["Package was damaged","Stopped working","Did not get it on time","Not the product that was ordred","Parts missing","Does not work with a product that I have","Gift exchange","Did not like the color","Did not like the model","Did not like the make","Did not like the warranty","No service location in my area","Found a better price in a store","Found a better extended warranty in a store","Not working any more","Did not fit","Wrong size","Lost my job","unauthoized purchase","duplicate purchase","its is a boy","it is a girl","reason 23","reason 24","reason 25","reason 26","reason 27","reason 28","reason 29","reason 31","reason 32","reason 33","reason 34","reason 35"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q94", + 'query': """ select + count(distinct ws_order_number) as "order count" + ,sum(ws_ext_ship_cost) as "total shipping cost" + ,sum(ws_net_profit) as "total net profit" + from + web_sales ws1 + ,date_dim + ,customer_address + ,web_site + where + d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '60' day) + and ws1.ws_ship_date_sk = d_date_sk + and ws1.ws_ship_addr_sk = ca_address_sk + and ca_state = '{STATE}' + and ws1.ws_web_site_sk = web_site_sk + and web_company_name = 'pri' + and exists (select * + from web_sales ws2 + where ws1.ws_order_number = ws2.ws_order_number + and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk) + and not exists(select * + from web_returns wr1 + where ws1.ws_order_number = wr1.wr_order_number) + order by count(distinct ws_order_number) + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [2,5] + }, + 'STATE': { + 'type': "list", + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q95", + 'query': """with ws_wh as + (select ws1.ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2 + from web_sales ws1,web_sales ws2 + where ws1.ws_order_number = ws2.ws_order_number + and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk) + select + count(distinct ws_order_number) as "order count" + ,sum(ws_ext_ship_cost) as "total shipping cost" + ,sum(ws_net_profit) as "total net profit" + from + web_sales ws1 + ,date_dim + ,customer_address + ,web_site + where + d_date between '{YEAR}-{MONTH}-01' and + (cast('{YEAR}-{MONTH}-01' as date) + interval '60' day) + and ws1.ws_ship_date_sk = d_date_sk + and ws1.ws_ship_addr_sk = ca_address_sk + and ca_state = '{STATE}' + and ws1.ws_web_site_sk = web_site_sk + and web_company_name = 'pri' + and ws1.ws_order_number in (select ws_order_number + from ws_wh) + and ws1.ws_order_number in (select wr_order_number + from web_returns,ws_wh + where wr_order_number = ws_wh.ws_order_number) + order by count(distinct ws_order_number) + limit 100""", + 'parameter': + { + 'MONTH': { + 'type': "integer", + 'range': [2,5] + }, + 'STATE': { + 'type': "list", + 'range': ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL","IN","KS","KY","LA","ME","MI","MN","MO","MS","MT","NC","ND","NE","NJ","NM","NV","OH","OK","OR","PA","SC","SD","TN","TX","UT","VA","WA","WI","WV"] + }, + 'YEAR': { + 'type': "integer", + 'range': [1999,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q96", + 'query': """ select count(*) counter + from store_sales + ,household_demographics + ,time_dim, store + where ss_sold_time_sk = time_dim.t_time_sk + and ss_hdemo_sk = household_demographics.hd_demo_sk + and ss_store_sk = s_store_sk + and time_dim.t_hour = {HOUR} + and time_dim.t_minute >= 30 + and household_demographics.hd_dep_count = {DEPCNT} + and store.s_store_name = 'ese' + order by count(*) + limit 100""", + 'parameter': + { + 'DEPCNT': { + 'type': "integer", + 'range': [0,9] + }, + 'HOUR': { + 'type': "list", + 'range': ["20","15","16","8"] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q97", + 'query': """with ssci as ( + select ss_customer_sk customer_sk + ,ss_item_sk item_sk + from store_sales,date_dim + where ss_sold_date_sk = d_date_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by ss_customer_sk + ,ss_item_sk), + csci as( + select cs_bill_customer_sk customer_sk + ,cs_item_sk item_sk + from catalog_sales,date_dim + where cs_sold_date_sk = d_date_sk + and d_month_seq between {DMS} and {DMS} + 11 + group by cs_bill_customer_sk + ,cs_item_sk) + select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only + ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only + ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog + from ssci full outer join csci on (ssci.customer_sk=csci.customer_sk + and ssci.item_sk = csci.item_sk) + limit 100""", + 'DBMS': { + 'MySQL': """WITH ssci AS ( + SELECT + ss_customer_sk AS customer_sk, + ss_item_sk AS item_sk + FROM store_sales, date_dim + WHERE ss_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY ss_customer_sk, ss_item_sk + ), + csci AS ( + SELECT + cs_bill_customer_sk AS customer_sk, + cs_item_sk AS item_sk + FROM catalog_sales, date_dim + WHERE cs_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN {DMS} AND {DMS} + 11 + GROUP BY cs_bill_customer_sk, cs_item_sk + ), + combined AS ( + SELECT + customer_sk, + item_sk, + 1 AS is_store, + 0 AS is_catalog + FROM ssci + WHERE customer_sk IS NOT NULL AND item_sk IS NOT NULL + UNION ALL + SELECT + customer_sk, + item_sk, + 0 AS is_store, + 1 AS is_catalog + FROM csci + WHERE customer_sk IS NOT NULL AND item_sk IS NOT NULL + ) + SELECT + SUM(CASE WHEN is_store = 1 AND is_catalog = 0 THEN 1 ELSE 0 END) AS store_only, + SUM(CASE WHEN is_store = 0 AND is_catalog = 1 THEN 1 ELSE 0 END) AS catalog_only, + SUM(CASE WHEN is_store = 1 AND is_catalog = 1 THEN 1 ELSE 0 END) AS store_and_catalog + FROM ( + SELECT + customer_sk, + item_sk, + MAX(is_store) AS is_store, + MAX(is_catalog) AS is_catalog + FROM combined + GROUP BY customer_sk, item_sk + ) AS summary + LIMIT 100""", + }, + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q98", + 'query': """select i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + ,sum(ss_ext_sales_price) as itemrevenue + ,sum(ss_ext_sales_price)*100./sum(sum(ss_ext_sales_price)) over + (partition by i_class) as revenueratio + from + store_sales + ,item + ,date_dim + where + ss_item_sk = i_item_sk + and i_category in ('{CATEGORY1}', '{CATEGORY2}', '{CATEGORY3}') + and ss_sold_date_sk = d_date_sk + and d_date between cast('{YEAR}-{MONTH}-01' as date) + and (cast('{YEAR}-{MONTH}-01' as date) + interval '30' day) + group by + i_item_id + ,i_item_desc + ,i_category + ,i_class + ,i_current_price + order by + i_category + ,i_class + ,i_item_id + ,i_item_desc + ,revenueratio""", + 'parameter': + { + 'CATEGORY': { + 'type': "list", + 'size': 3, + 'range': ["Books","Children","Electronics","Home","Jewelry","Men","Music","Shoes","Sports","Women"] + }, + 'MONTH': { + 'type': "integer", + 'range': [1,7] + }, + 'YEAR': { + 'type': "integer", + 'range': [1998,2002] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + }, + { + 'title': "TPC-DS Q99", + 'query': """select + substr(w_warehouse_name,1,20) as warehouse_name + ,sm_type + ,cc_name + ,sum(case when (cs_ship_date_sk - cs_sold_date_sk <= 30 ) then 1 else 0 end) as "30 days" + ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 30) and + (cs_ship_date_sk - cs_sold_date_sk <= 60) then 1 else 0 end ) as "31-60 days" + ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 60) and + (cs_ship_date_sk - cs_sold_date_sk <= 90) then 1 else 0 end) as "61-90 days" + ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 90) and + (cs_ship_date_sk - cs_sold_date_sk <= 120) then 1 else 0 end) as "91-120 days" + ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 120) then 1 else 0 end) as ">120 days" + from + catalog_sales + ,warehouse + ,ship_mode + ,call_center + ,date_dim + where + d_month_seq between {DMS} and {DMS} + 11 + and cs_ship_date_sk = d_date_sk + and cs_warehouse_sk = w_warehouse_sk + and cs_ship_mode_sk = sm_ship_mode_sk + and cs_call_center_sk = cc_call_center_sk + group by + substr(w_warehouse_name,1,20) + ,sm_type + ,cc_name + order by substr(w_warehouse_name,1,20) + ,sm_type + ,cc_name + limit 100""", + 'parameter': + { + 'DMS': { + 'type': "integer", + 'range': [1176,1224] + } + }, + 'active': True, + 'numWarmup': 0, + 'numCooldown': 0, + 'numRun': 1, + 'timer': + { + 'datatransfer': + { + 'active': True, + 'sorted': True, + 'compare': 'result', + 'store': [], + 'precision': 0, + }, + 'connection': + { + 'active': True, + } + } + } + ] +} \ No newline at end of file diff --git a/images/evaluator_dbmsbenchmarker/Dockerfile_template b/images/evaluator_dbmsbenchmarker/Dockerfile_template index cf8f0e500..4e52d43ba 100644 --- a/images/evaluator_dbmsbenchmarker/Dockerfile_template +++ b/images/evaluator_dbmsbenchmarker/Dockerfile_template @@ -1,14 +1,53 @@ -FROM python:3.12.5 +#FROM python:3.12.5 +FROM alpine:3.15 # does not compile numpy correctly # FROM python:3.13-rc-slim +# Set environment variables for Python installation +ENV PYTHON_VERSION=3.12.8 +ENV PYTHON_SRC=https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz + +# Install necessary packages +RUN apk add --no-cache \ + build-base \ + linux-headers \ + libffi-dev \ + openssl-dev \ + zlib-dev \ + bzip2-dev \ + xz-dev \ + readline-dev \ + sqlite-dev \ + ncurses-dev \ + util-linux-dev \ + libressl-dev \ + tar \ + curl + +# Download, extract, and compile Python +RUN curl -fSL $PYTHON_SRC -o /tmp/Python.tgz && \ + tar -xzf /tmp/Python.tgz -C /tmp && \ + cd /tmp/Python-$PYTHON_VERSION && \ + ./configure --enable-optimizations && \ + make && \ + make install && \ + rm -rf /tmp/Python.tgz /tmp/Python-$PYTHON_VERSION + +# Verify Python installation +RUN python3.12 --version && pip3 --version + +# Set default python and pip to Python 3.12 +RUN ln -sf /usr/local/bin/python3.12 /usr/local/bin/python && \ + ln -sf /usr/local/bin/pip3 /usr/local/bin/pip + WORKDIR /usr/src/app -RUN apt update -RUN apt install default-jre -y -RUN apt install zip -y -RUN apt install nano -y +# Update package index and install necessary packages +RUN apk update && apk add --no-cache \ + openjdk17-jre \ + zip \ + nano # only needed in slim releases # RUN apt install git -y @@ -16,13 +55,19 @@ RUN apt install nano -y RUN mkdir /results +RUN apk add --no-cache bash + SHELL ["/bin/bash", "-c"] ENV VIRTUAL_ENV=/opt/venv -RUN python3 -m pip install --user virtualenv -RUN python3 -m venv $VIRTUAL_ENV + +# CMD sleep3600 + +RUN python -m pip install virtualenv +RUN python -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" +RUN apk add --no-cache git RUN JAVA_HOME=/usr/lib/jvm/java-1.8.0/ pip install --no-cache-dir --upgrade --force-reinstall git+https://github.com/Beuth-Erdelt/DBMS-Benchmarker@{version} RUN pip install jupyter diff --git a/images/evaluator_dbmsbenchmarker/Dockerfile_v0.14.6 b/images/evaluator_dbmsbenchmarker/Dockerfile_v0.14.6 index 32fabdde0..a30402fad 100644 --- a/images/evaluator_dbmsbenchmarker/Dockerfile_v0.14.6 +++ b/images/evaluator_dbmsbenchmarker/Dockerfile_v0.14.6 @@ -1,14 +1,53 @@ -FROM python:3.12.5 +#FROM python:3.12.5 +FROM alpine:3.15 # does not compile numpy correctly # FROM python:3.13-rc-slim +# Set environment variables for Python installation +ENV PYTHON_VERSION=3.12.8 +ENV PYTHON_SRC=https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz + +# Install necessary packages +RUN apk add --no-cache \ + build-base \ + linux-headers \ + libffi-dev \ + openssl-dev \ + zlib-dev \ + bzip2-dev \ + xz-dev \ + readline-dev \ + sqlite-dev \ + ncurses-dev \ + util-linux-dev \ + libressl-dev \ + tar \ + curl + +# Download, extract, and compile Python +RUN curl -fSL $PYTHON_SRC -o /tmp/Python.tgz && \ + tar -xzf /tmp/Python.tgz -C /tmp && \ + cd /tmp/Python-$PYTHON_VERSION && \ + ./configure --enable-optimizations && \ + make && \ + make install && \ + rm -rf /tmp/Python.tgz /tmp/Python-$PYTHON_VERSION + +# Verify Python installation +RUN python3.12 --version && pip3 --version + +# Set default python and pip to Python 3.12 +RUN ln -sf /usr/local/bin/python3.12 /usr/local/bin/python && \ + ln -sf /usr/local/bin/pip3 /usr/local/bin/pip + WORKDIR /usr/src/app -RUN apt update -RUN apt install default-jre -y -RUN apt install zip -y -RUN apt install nano -y +# Update package index and install necessary packages +RUN apk update && apk add --no-cache \ + openjdk17-jre \ + zip \ + nano # only needed in slim releases # RUN apt install git -y @@ -16,13 +55,19 @@ RUN apt install nano -y RUN mkdir /results +RUN apk add --no-cache bash + SHELL ["/bin/bash", "-c"] ENV VIRTUAL_ENV=/opt/venv -RUN python3 -m pip install --user virtualenv -RUN python3 -m venv $VIRTUAL_ENV + +# CMD sleep3600 + +RUN python -m pip install virtualenv +RUN python -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" +RUN apk add --no-cache git RUN JAVA_HOME=/usr/lib/jvm/java-1.8.0/ pip install --no-cache-dir --upgrade --force-reinstall git+https://github.com/Beuth-Erdelt/DBMS-Benchmarker@v0.14.6 RUN pip install jupyter diff --git a/images/tpch/loader_postgresql/Dockerfile b/images/tpch/loader_postgresql/Dockerfile index 0b63bb3fb..81c35f21b 100644 --- a/images/tpch/loader_postgresql/Dockerfile +++ b/images/tpch/loader_postgresql/Dockerfile @@ -1,12 +1,17 @@ -FROM debian:stable-20221004-slim +#FROM debian:stable-20221004-slim +FROM alpine:3.15 -RUN apt-get -y update && apt-get clean all +RUN apk update -RUN apt-get install -y build-essential -RUN apt-get install -y wget -RUN wget http://download.redis.io/redis-stable.tar.gz && tar xvzf redis-stable.tar.gz && cd redis-stable && make && cp src/redis-cli /usr/local/bin/ && chmod 755 /usr/local/bin/redis-cli +RUN apk add --no-cache bash -RUN apt-get update && apt-get -y install postgresql-client +RUN apk add --no-cache nano + +RUN apk add --no-cache make cmake gcc libc-dev + +RUN cd /tmp; wget http://download.redis.io/redis-stable.tar.gz; tar xvzf redis-stable.tar.gz; cd redis-stable; make; cp src/redis-cli /usr/local/bin/; chmod 755 /usr/local/bin/redis-cli + +RUN apk update && apk --update add postgresql-client ENV NUM_PODS=4 ENV CHILD=1 diff --git a/k8s/deploymenttemplate-DatabaseService.yml b/k8s/deploymenttemplate-DatabaseService.yml new file mode 100644 index 000000000..647f54eb5 --- /dev/null +++ b/k8s/deploymenttemplate-DatabaseService.yml @@ -0,0 +1,209 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + name: bexhoma-storage +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + storageClassName: shared +--- +apiVersion: v1 +kind: Service +metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + name: bexhoma-service +spec: + ports: + - {port: 9091, protocol: TCP, name: port-dbms, targetPort: 5432} + - {port: 9300, protocol: TCP, name: port-monitoring, targetPort: 9300} + selector: {app: bexhoma, component: sut, configuration: default, experiment: default} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + name: bexhoma-deployment-postgres +spec: + replicas: 1 + selector: + matchLabels: {app: bexhoma, component: sut, configuration: default, experiment: default} + template: + metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + spec: + automountServiceAccountToken: false + imagePullSecrets: + - {name: dockerhub} + nodeSelector: + tolerations: + #- key: "nvidia.com/gpu" + # effect: "NoSchedule" + terminationGracePeriodSeconds: 180 + containers: + - name: dbms + image: postgres:16.1 + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + # pg_ctl: cannot be run as root + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo 'Hello from the postStart handler' > /usr/share/message && cat /usr/share/message"] + preStop: + exec: + #command: ["/bin/sh", "-c", "gosu postgres pg_ctl stop -D /var/lib/postgresql/data -m fast"] + #command: ["/bin/sh", "-c", "gosu postgres pg_ctl stop -m fast"] + #command: ["echo 'PRESTOP' ;", "/bin/sh", "-c", "gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "echo 'Hello from the preStop handler'; gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["gosu postgres", "pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c"] + #args: ["gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "gosu postgres 'pg_ctl stop -m smart -t 120'"] + #command: ["/bin/sh", "-c", "gosu postgres '/usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120'"] + #command: ["/bin/sh -c 'gosu postgres /usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120'"] + command: ["/bin/sh", "-c", "gosu postgres /usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "echo 'Hello from the preStop handler' > /usr/share/message && cat /usr/share/message"] + #command: ["sh", "-c", "trap 'gosu postgres pg_ctl stop -m smart' SIGTERM; gosu postgres postgres"] # trap: SIGTERM: bad trap + readinessProbe: + exec: + command: + - /bin/sh + - "-c" + - > + if pg_isready -h localhost -p 5432 > /dev/null; then + recovery_status=$(psql -h localhost -p 5432 -U postgres -t -c "SELECT pg_is_in_recovery();" | xargs); + [ "$recovery_status" = "f" ] && exit 0 || exit 1; + else + exit 1; + fi + #command: + #- pg_isready + #- -U + #- postgres + initialDelaySeconds: 15 + periodSeconds: 60 + successThreshold: 3 + ports: + - {containerPort: 5432} + securityContext: + allowPrivilegeEscalation: false + #runAsNonRoot: true + #runAsUser: 1000 + #runAsGroup: 1000 + #capabilities: + # drop: + # - ALL + #readOnlyRootFilesystem: true #could not create lock file "/var/run/postgresql/.s.PGSQL.5432.lock": Read-only file system + resources: + limits: {cpu: 16000m, memory: 128Gi} + requests: {cpu: 1000m, memory: 1Gi} + #, ephemeral-storage: "1536Gi"} + volumeMounts: + - {mountPath: /data, name: benchmark-data-volume} + - {mountPath: /dev/shm, name: dshm} + - {mountPath: /var/lib/postgresql/data, name: benchmark-storage-volume} + args: [ + "-c", "max_worker_processes=64", + "-c", "max_parallel_workers=64", + "-c", "max_parallel_workers_per_gather=64", + "-c", "max_parallel_maintenance_workers=64", # only for PostgreSQL > 10 (?) + "-c", "max_wal_size=32GB", + "-c", "shared_buffers=64GB", + #"-c", "shared_memory_size=32GB", # read-only + "-c", "max_connections=2048", + "-c", "autovacuum_max_workers=10", + "-c", "autovacuum_vacuum_cost_limit=3000", + "-c", "vacuum_cost_limit=1000", + "-c", "checkpoint_completion_target=0.9", + "-c", "cpu_tuple_cost=0.03", + "-c", "effective_cache_size=64GB", + "-c", "maintenance_work_mem=2GB", + #"-c", "max_connections=1700", + #"-c", "random_page_cost=1.1", + "-c", "wal_buffers=1GB", + "-c", "work_mem=32GB", + #"-c", "huge_pages=on", + "-c", "temp_buffers=4GB", + "-c", "autovacuum_work_mem=-1", + "-c", "max_stack_depth=7MB", + "-c", "max_files_per_process=4000", + "-c", "effective_io_concurrency=32", + "-c", "wal_level=minimal", + "-c", "max_wal_senders=0", + "-c", "synchronous_commit=off", + "-c", "checkpoint_timeout=1h", + "-c", "checkpoint_warning=0", + "-c", "autovacuum=off", + "-c", "max_locks_per_transaction=64", + "-c", "max_pred_locks_per_transaction=64", + "-c", "default_statistics_target=1000", + "-c", "random_page_cost=60" + ] + # , "-c", "listen_addresses='*'", "-c", "logging_collector=on" + # , "-c", "pg_stat_statements.save=off", "-c", "pg_stat_statements.track=all", "-c", "shared_preload_libraries='pg_stat_statements'" + - name: cadvisor + image: gcr.io/cadvisor/cadvisor:v0.47.0 + args: ["--port", "9300", "--storage_duration", "20m0s", "--docker_only", "true", "--disable_metrics", "disk,network,tcp,advtcp,udp,sched,process,hugetlb", "--application_metrics_count_limit", "30", "--housekeeping_interval", "5s"] + ports: + - containerPort: 9300 + #hostPort: 9300 + name: http + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + #runAsNonRoot: true + #runAsUser: 1000 + #runAsGroup: 1000 + #capabilities: + # drop: + # - ALL + readOnlyRootFilesystem: true + resources: + requests: {cpu: 150m, memory: 200Mi} + limits: {cpu: 16000m, memory: 128Gi} + volumeMounts: + - name: rootfs + mountPath: /rootfs + readOnly: true + - name: var-run + mountPath: /var/run + readOnly: true + - name: sys + mountPath: /sys + readOnly: true + - name: docker + mountPath: /var/lib/docker + readOnly: true + - name: disk + mountPath: /dev/disk + readOnly: true + volumes: + - name: benchmark-data-volume + persistentVolumeClaim: {claimName: bexhoma-data} + - name: benchmark-storage-volume + persistentVolumeClaim: {claimName: bexhoma-storage} + - name: rootfs + hostPath: + path: / + - name: var-run + hostPath: + path: /var/run + - name: sys + hostPath: + path: /sys + - name: docker + hostPath: + path: /var/lib/docker + - name: disk + hostPath: + path: /dev/disk + - name: dshm + emptyDir: + medium: Memory diff --git a/k8s/deploymenttemplate-PostgreSQLService.yml b/k8s/deploymenttemplate-PostgreSQLService.yml new file mode 100644 index 000000000..e515cdc21 --- /dev/null +++ b/k8s/deploymenttemplate-PostgreSQLService.yml @@ -0,0 +1,190 @@ +apiVersion: v1 +kind: Service +metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + name: bexhoma-service +spec: + ports: + - {port: 9091, protocol: TCP, name: port-dbms, targetPort: 5432} + - {port: 9300, protocol: TCP, name: port-monitoring, targetPort: 9300} + selector: {app: bexhoma, component: sut, configuration: default, experiment: default} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + name: bexhoma-deployment-postgres +spec: + replicas: 1 + selector: + matchLabels: {app: bexhoma, component: sut, configuration: default, experiment: default} + template: + metadata: + labels: {app: bexhoma, component: sut, configuration: default, experiment: default} + spec: + automountServiceAccountToken: false + imagePullSecrets: + - {name: dockerhub} + nodeSelector: + tolerations: + #- key: "nvidia.com/gpu" + # effect: "NoSchedule" + terminationGracePeriodSeconds: 180 + containers: + - name: dbms + image: postgres:16.1 + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + # pg_ctl: cannot be run as root + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo 'Hello from the postStart handler' > /usr/share/message && cat /usr/share/message"] + preStop: + exec: + #command: ["/bin/sh", "-c", "gosu postgres pg_ctl stop -D /var/lib/postgresql/data -m fast"] + #command: ["/bin/sh", "-c", "gosu postgres pg_ctl stop -m fast"] + #command: ["echo 'PRESTOP' ;", "/bin/sh", "-c", "gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "echo 'Hello from the preStop handler'; gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["gosu postgres", "pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c"] + #args: ["gosu postgres pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "gosu postgres 'pg_ctl stop -m smart -t 120'"] + #command: ["/bin/sh", "-c", "gosu postgres '/usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120'"] + #command: ["/bin/sh -c 'gosu postgres /usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120'"] + command: ["/bin/sh", "-c", "gosu postgres /usr/lib/postgresql/16/bin/pg_ctl stop -m smart -t 120"] + #command: ["/bin/sh", "-c", "echo 'Hello from the preStop handler' > /usr/share/message && cat /usr/share/message"] + #command: ["sh", "-c", "trap 'gosu postgres pg_ctl stop -m smart' SIGTERM; gosu postgres postgres"] # trap: SIGTERM: bad trap + readinessProbe: + exec: + command: + - /bin/sh + - "-c" + - > + if pg_isready -h localhost -p 5432 > /dev/null; then + recovery_status=$(psql -h localhost -p 5432 -U postgres -t -c "SELECT pg_is_in_recovery();" | xargs); + [ "$recovery_status" = "f" ] && exit 0 || exit 1; + else + exit 1; + fi + #command: + #- pg_isready + #- -U + #- postgres + initialDelaySeconds: 15 + periodSeconds: 60 + successThreshold: 3 + ports: + - {containerPort: 5432} + securityContext: + allowPrivilegeEscalation: false + #runAsNonRoot: true + #runAsUser: 1000 + #runAsGroup: 1000 + #capabilities: + # drop: + # - ALL + #readOnlyRootFilesystem: true #could not create lock file "/var/run/postgresql/.s.PGSQL.5432.lock": Read-only file system + resources: + limits: {cpu: 16000m, memory: 128Gi} + requests: {cpu: 1000m, memory: 1Gi} + #, ephemeral-storage: "1536Gi"} + volumeMounts: + - {mountPath: /dev/shm, name: dshm} + args: [ + "-c", "max_worker_processes=64", + "-c", "max_parallel_workers=64", + "-c", "max_parallel_workers_per_gather=64", + "-c", "max_parallel_maintenance_workers=64", # only for PostgreSQL > 10 (?) + "-c", "max_wal_size=32GB", + "-c", "shared_buffers=64GB", + #"-c", "shared_memory_size=32GB", # read-only + "-c", "max_connections=2048", + "-c", "autovacuum_max_workers=10", + "-c", "autovacuum_vacuum_cost_limit=3000", + "-c", "vacuum_cost_limit=1000", + "-c", "checkpoint_completion_target=0.9", + "-c", "cpu_tuple_cost=0.03", + "-c", "effective_cache_size=64GB", + "-c", "maintenance_work_mem=2GB", + #"-c", "max_connections=1700", + #"-c", "random_page_cost=1.1", + "-c", "wal_buffers=1GB", + "-c", "work_mem=32GB", + #"-c", "huge_pages=on", + "-c", "temp_buffers=4GB", + "-c", "autovacuum_work_mem=-1", + "-c", "max_stack_depth=7MB", + "-c", "max_files_per_process=4000", + "-c", "effective_io_concurrency=32", + "-c", "wal_level=minimal", + "-c", "max_wal_senders=0", + "-c", "synchronous_commit=off", + "-c", "checkpoint_timeout=1h", + "-c", "checkpoint_warning=0", + "-c", "autovacuum=off", + "-c", "max_locks_per_transaction=64", + "-c", "max_pred_locks_per_transaction=64", + "-c", "default_statistics_target=1000", + "-c", "random_page_cost=60" + ] + # , "-c", "listen_addresses='*'", "-c", "logging_collector=on" + # , "-c", "pg_stat_statements.save=off", "-c", "pg_stat_statements.track=all", "-c", "shared_preload_libraries='pg_stat_statements'" + - name: cadvisor + image: gcr.io/cadvisor/cadvisor:v0.47.0 + args: ["--port", "9300", "--storage_duration", "20m0s", "--docker_only", "true", "--disable_metrics", "disk,network,tcp,advtcp,udp,sched,process,hugetlb", "--application_metrics_count_limit", "30", "--housekeeping_interval", "5s"] + ports: + - containerPort: 9300 + #hostPort: 9300 + name: http + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + #runAsNonRoot: true + #runAsUser: 1000 + #runAsGroup: 1000 + #capabilities: + # drop: + # - ALL + readOnlyRootFilesystem: true + resources: + requests: {cpu: 150m, memory: 200Mi} + limits: {cpu: 16000m, memory: 128Gi} + volumeMounts: + - name: rootfs + mountPath: /rootfs + readOnly: true + - name: var-run + mountPath: /var/run + readOnly: true + - name: sys + mountPath: /sys + readOnly: true + - name: docker + mountPath: /var/lib/docker + readOnly: true + - name: disk + mountPath: /dev/disk + readOnly: true + volumes: + - name: rootfs + hostPath: + path: / + - name: var-run + hostPath: + path: /var/run + - name: sys + hostPath: + path: /sys + - name: docker + hostPath: + path: /var/lib/docker + - name: disk + hostPath: + path: /dev/disk + - name: dshm + emptyDir: + medium: Memory diff --git a/logs_tests/doc_benchbase_cockroachdb_1.log b/logs_tests/doc_benchbase_cockroachdb_1.log index c280a7906..261b43f52 100644 --- a/logs_tests/doc_benchbase_cockroachdb_1.log +++ b/logs_tests/doc_benchbase_cockroachdb_1.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730373213 -Experiment : starts at 2024-10-31 12:13:32.687550 (58014.094684341) +Experiment : has code 1734646253 +Experiment : starts at 2024-12-19 23:10:53.358305 (4331054.765439166) Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -17,26 +17,28 @@ Benchmarking is tested with [16] threads, split into [1, 2] pods. Benchmarking is run as [1] times the number of benchmarking pods. Experiment is run once. Cluster monitoring : is running -Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST" -Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST" +Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST", which may be dropped when using apply +Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST", which may be dropped when using apply done CockroachDB-1-1-1024 : will start now - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:16:33 (that is in 120 secs) +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:13:54 (that is in 120 secs) - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:16:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:13:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:16:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:13:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:16:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:13:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet done -CockroachDB-1-1-1024 Workers 1 of 1 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734646253-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-2'] +CockroachDB-1-1-1024 Workers 3 of 3 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734646253-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-2'] CockroachDB-1-1-1024 : start asynch loading scripts of type loaded CockroachDB-1-1-1024 : is loading - waiting 30s - : done @@ -53,17 +55,21 @@ CockroachDB-1-1-1024 : is loading CockroachDB-1-1-1024 : is loading - waiting 30s - : done CockroachDB-1-1-1024 : is loading -done +- waiting 30s - : done CockroachDB-1-1-1024 : is loading done CockroachDB-1-1-1024 : showing loader times -CockroachDB-1-1-1024 : loader timespan (first to last [s]) = 236 +CockroachDB-1-1-1024 : loader timespan (first to last [s]) = 267 CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 1 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-1 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730373213 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734646253-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734646253 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730373213-1-1-q92s6: found +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734646253-1-1-vbl52: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -85,16 +91,20 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-1 : showing benchmarker times -CockroachDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 312 -CockroachDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1730373678, 1730373983)] -CockroachDB-1-1-1024-1 : found and updated times {'benchmarker': [(1730373678, 1730373983)]} +CockroachDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 314 +CockroachDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1734646723, 1734647027)] +CockroachDB-1-1-1024-1 : found and updated times {'benchmarker': [(1734646723, 1734647027)]} done CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 2 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-2 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730373213 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734646253-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734646253-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734646253-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734646253 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730373213-1-2-5458b: found +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734646253-1-2-2vzvp: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -116,33 +126,25 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-2 : showing benchmarker times -CockroachDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 313 -CockroachDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1730374029, 1730374333), (1730374028, 1730374332)] -CockroachDB-1-1-1024-2 : found and updated times {'benchmarker': [(1730374029, 1730374333), (1730374028, 1730374332)]} +CockroachDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 314 +CockroachDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1734647075, 1734647379), (1734647075, 1734647379)] +CockroachDB-1-1-1024-2 : found and updated times {'benchmarker': [(1734647075, 1734647379), (1734647075, 1734647379)]} done CockroachDB-1-1-1024 : can be stopped -Experiment ends at 2024-10-31 12:32:50.563815 (59171.970949174): 1157.8762648330012s total +Experiment ends at 2024-12-19 23:30:19.076842 (4332220.48397571): 1165.7185365445912s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... Latency Distribution.Average Latency (microseconds) -connection_pod ... -CockroachDB-1-1-1024-2-1 CockroachDB-1-1-1024-2 ... 25042 -CockroachDB-1-1-1024-2-2 CockroachDB-1-1-1024-2 ... 25093 -CockroachDB-1-1-1024-1-1 CockroachDB-1-1-1024-1 ... 22911 - -[3 rows x 36 columns] -Workflow {'CockroachDB-1-1-1024': [[2, 1]]} -Result workflow not complete +Result workflow complete ## Show Summary ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1158s - Code: 1730373213 - This includes no queries. Benchbase runs the benchmark + Duration: 1166s + Code: 1734646253 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -158,41 +160,95 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215596 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461657896 + datadisk:116314488 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635102812 + datadisk:116104180 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:123840188 + datadisk:116091372 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215600 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461867536 + datadisk:116522308 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635488320 + datadisk:116308436 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124062476 + datadisk:116312956 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 16 16384 1 300.0 697.93 55073.0 22911.0 -CockroachDB-1-1-1024-2 1 16 16384 2 300.0 637.91 62856.0 25067.5 +CockroachDB-1-1-1024-1 1 16 16384 1 300.0 312.89 95381.0 51118.0 +CockroachDB-1-1-1024-2 1 16 16384 2 300.0 241.61 142861.0 66206.0 Warehouses: 16 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 236.0 1.0 1.0 244.067797 -CockroachDB-1-1-1024-2 236.0 1.0 2.0 244.067797 +CockroachDB-1-1-1024-1 267.0 1.0 1.0 215.730337 +CockroachDB-1-1-1024-2 267.0 1.0 2.0 215.730337 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_cockroachdb_1_summary.txt b/logs_tests/doc_benchbase_cockroachdb_1_summary.txt index 22d4415cd..a85ba7163 100644 --- a/logs_tests/doc_benchbase_cockroachdb_1_summary.txt +++ b/logs_tests/doc_benchbase_cockroachdb_1_summary.txt @@ -3,9 +3,9 @@ ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1158s - Code: 1730373213 - This includes no queries. Benchbase runs the benchmark + Duration: 1166s + Code: 1734646253 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,41 +21,95 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215596 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461657896 + datadisk:116314488 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635102812 + datadisk:116104180 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:123840188 + datadisk:116091372 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215600 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:461867536 + datadisk:116522308 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:635488320 + datadisk:116308436 + volume_size:1000G + volume_used:109G + worker 2 + RAM:540587499520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124062476 + datadisk:116312956 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 16 16384 1 300.0 697.93 55073.0 22911.0 -CockroachDB-1-1-1024-2 1 16 16384 2 300.0 637.91 62856.0 25067.5 +CockroachDB-1-1-1024-1 1 16 16384 1 300.0 312.89 95381.0 51118.0 +CockroachDB-1-1-1024-2 1 16 16384 2 300.0 241.61 142861.0 66206.0 Warehouses: 16 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 236.0 1.0 1.0 244.067797 -CockroachDB-1-1-1024-2 236.0 1.0 2.0 244.067797 +CockroachDB-1-1-1024-1 267.0 1.0 1.0 215.730337 +CockroachDB-1-1-1024-2 267.0 1.0 2.0 215.730337 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_cockroachdb_2.log b/logs_tests/doc_benchbase_cockroachdb_2.log index 13f3ae9b0..1cdebe1e7 100644 --- a/logs_tests/doc_benchbase_cockroachdb_2.log +++ b/logs_tests/doc_benchbase_cockroachdb_2.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730374413 -Experiment : starts at 2024-10-31 12:33:33.078575 (59214.485708551) +Experiment : has code 1734647454 +Experiment : starts at 2024-12-19 23:30:53.703666 (4332255.110801013) Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -17,26 +17,28 @@ Benchmarking is tested with [64] threads, split into [1, 2, 4, 8] pods. Benchmarking is run as [1] times the number of benchmarking pods. Experiment is run once. Cluster monitoring : is running -Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST" -Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST" +Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST", which may be dropped when using apply +Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST", which may be dropped when using apply done CockroachDB-1-1-1024 : will start now - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:36:33 (that is in 120 secs) +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:33:54 (that is in 120 secs) - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:36:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:33:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:36:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:33:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet -CockroachDB-1-1-1024 : will start loading but not before 2024-10-31 11:36:33 +CockroachDB-1-1-1024 : will start loading but not before 2024-12-19 22:33:54 - waiting 30s - : done CockroachDB-1-1-1024 : is not loaded yet done -CockroachDB-1-1-1024 Workers 1 of 1 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] +CockroachDB-1-1-1024 Workers 3 of 3 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] CockroachDB-1-1-1024 : start asynch loading scripts of type loaded CockroachDB-1-1-1024 : is loading - waiting 30s - : done @@ -73,15 +75,53 @@ CockroachDB-1-1-1024 : is loading CockroachDB-1-1-1024 : is loading - waiting 30s - : done CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading +- waiting 30s - : done +CockroachDB-1-1-1024 : is loading done CockroachDB-1-1-1024 : showing loader times -CockroachDB-1-1-1024 : loader timespan (first to last [s]) = 531 +CockroachDB-1-1-1024 : loader timespan (first to last [s]) = 1036 CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 1 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 64, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-1 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730374413 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734647454 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730374413-1-1-pzq2d: found +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734647454-1-1-lbt66: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -323,16 +363,20 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-1 : showing benchmarker times -CockroachDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 3625 -CockroachDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1730375150, 1730378765)] -CockroachDB-1-1-1024-1 : found and updated times {'benchmarker': [(1730375150, 1730378765)]} +CockroachDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 3637 +CockroachDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1734648709, 1734652333)] +CockroachDB-1-1-1024-1 : found and updated times {'benchmarker': [(1734648709, 1734652333)]} done CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 2 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 32, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-2 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730374413 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734647454 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730374413-1-2-h8bjm: found +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734647454-1-2-tsf9s: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -574,18 +618,20 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-2 : showing benchmarker times -CockroachDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 3627 -CockroachDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1730378811, 1730382421), (1730378811, 1730382421)] -CockroachDB-1-1-1024-2 : found and updated times {'benchmarker': [(1730378811, 1730382421), (1730378811, 1730382421)]} +CockroachDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 3642 +CockroachDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1734652384, 1734655998), (1734652384, 1734655998)] +CockroachDB-1-1-1024-2 : found and updated times {'benchmarker': [(1734652384, 1734655998), (1734652384, 1734655998)]} done CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 3 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '4', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 4096, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-3 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730374413 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734647454 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730374413-1-3-c4rm7: found -- waiting 30s - : done -CockroachDB-1-1-1024 : has running benchmarks +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734647454-1-3-l8x4n: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -825,18 +871,20 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-3 : showing benchmarker times -CockroachDB-1-1-1024-3 : benchmarker timespan (start to end single container [s]) = 3634 -CockroachDB-1-1-1024-3 : benchmarker times (start/end per pod and container) = [(1730382478, 1730386086), (1730382478, 1730386086), (1730382478, 1730386085), (1730382479, 1730386086)] -CockroachDB-1-1-1024-3 : found and updated times {'benchmarker': [(1730382478, 1730386086), (1730382478, 1730386086), (1730382478, 1730386085), (1730382479, 1730386086)]} +CockroachDB-1-1-1024-3 : benchmarker timespan (start to end single container [s]) = 3625 +CockroachDB-1-1-1024-3 : benchmarker times (start/end per pod and container) = [(1734656067, 1734659676), (1734656066, 1734659676), (1734656067, 1734659677), (1734656066, 1734659676)] +CockroachDB-1-1-1024-3 : found and updated times {'benchmarker': [(1734656067, 1734659676), (1734656066, 1734659676), (1734656067, 1734659677), (1734656066, 1734659676)]} done CockroachDB-1-1-1024 : benchmarks done 0 of 1. This will be client 4 CockroachDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '8', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'cockroachdb', 'BEXHOMA_DATABASE': 'defaultdb', 'BENCHBASE_TARGET': 2048, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'root', 'BEXHOMA_PASSWORD': ''} CockroachDB-1-1-1024-4 : start benchmarking -CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730374413 +Worker pods found: ['bexhoma-worker-cockroachdb-1-1-1024-1734647454-0', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-1', 'bexhoma-worker-cockroachdb-1-1-1024-1734647454-2'] +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-0 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-1 +CockroachDB-1-1-1024 : distributed system - get host info for worker bexhoma-worker-cockroachdb-1-1-1024-1734647454-2 +CockroachDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734647454 - waiting 10s - : done -CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1730374413-1-4-6n5f2: found -- waiting 30s - : done -CockroachDB-1-1-1024 : has running benchmarks +CockroachDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-cockroachdb-1-1-1024-1734647454-1-4-8n254: found - waiting 30s - : done CockroachDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -1076,35 +1124,15 @@ CockroachDB-1-1-1024 : has running benchmarks done CockroachDB-1-1-1024 : has running benchmarks CockroachDB-1-1-1024-4 : showing benchmarker times -CockroachDB-1-1-1024-4 : benchmarker timespan (start to end single container [s]) = 3653 -CockroachDB-1-1-1024-4 : benchmarker times (start/end per pod and container) = [(1730386153, 1730389760), (1730386154, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760), (1730386154, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760)] -CockroachDB-1-1-1024-4 : found and updated times {'benchmarker': [(1730386153, 1730389760), (1730386154, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760), (1730386154, 1730389760), (1730386153, 1730389760), (1730386153, 1730389760)]} +CockroachDB-1-1-1024-4 : benchmarker timespan (start to end single container [s]) = 3652 +CockroachDB-1-1-1024-4 : benchmarker times (start/end per pod and container) = [(1734659733, 1734663341), (1734659732, 1734663341), (1734659732, 1734663341), (1734659733, 1734663341), (1734659732, 1734663340), (1734659733, 1734663340), (1734659732, 1734663340), (1734659733, 1734663341)] +CockroachDB-1-1-1024-4 : found and updated times {'benchmarker': [(1734659733, 1734663341), (1734659732, 1734663341), (1734659732, 1734663341), (1734659733, 1734663341), (1734659732, 1734663340), (1734659733, 1734663340), (1734659732, 1734663340), (1734659733, 1734663341)]} done CockroachDB-1-1-1024 : can be stopped -Experiment ends at 2024-10-31 16:50:30.565068 (74631.97220079): 15417.486492238997s total +Experiment ends at 2024-12-20 03:56:50.447696 (4348211.854830527): 15956.74402951356s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... Latency Distribution.Average Latency (microseconds) -connection_pod ... -CockroachDB-1-1-1024-4-1 CockroachDB-1-1-1024-4 ... 118664 -CockroachDB-1-1-1024-3-1 CockroachDB-1-1-1024-3 ... 106931 -CockroachDB-1-1-1024-4-2 CockroachDB-1-1-1024-4 ... 119281 -CockroachDB-1-1-1024-4-3 CockroachDB-1-1-1024-4 ... 118937 -CockroachDB-1-1-1024-4-4 CockroachDB-1-1-1024-4 ... 118812 -CockroachDB-1-1-1024-2-1 CockroachDB-1-1-1024-2 ... 98010 -CockroachDB-1-1-1024-3-2 CockroachDB-1-1-1024-3 ... 106741 -CockroachDB-1-1-1024-4-5 CockroachDB-1-1-1024-4 ... 119215 -CockroachDB-1-1-1024-3-3 CockroachDB-1-1-1024-3 ... 106713 -CockroachDB-1-1-1024-1-1 CockroachDB-1-1-1024-1 ... 91853 -CockroachDB-1-1-1024-4-6 CockroachDB-1-1-1024-4 ... 119222 -CockroachDB-1-1-1024-2-2 CockroachDB-1-1-1024-2 ... 97986 -CockroachDB-1-1-1024-4-7 CockroachDB-1-1-1024-4 ... 118708 -CockroachDB-1-1-1024-3-4 CockroachDB-1-1-1024-3 ... 106869 -CockroachDB-1-1-1024-4-8 CockroachDB-1-1-1024-4 ... 118720 - -[15 rows x 36 columns] -Workflow {'CockroachDB-1-1-1024': [[8, 4, 2, 1]]} Result workflow not complete ## Show Summary @@ -1112,9 +1140,9 @@ Result workflow not complete ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 15418s - Code: 1730374413 - This includes no queries. Benchbase runs the benchmark + Duration: 15957s + Code: 1734647454 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -1130,61 +1158,169 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215616 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:469202612 + datadisk:123845016 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:160789996 + datadisk:123623440 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:644345576 + datadisk:123626156 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912216 + disk:249215820 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:475115332 + datadisk:129756564 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:166744188 + datadisk:129577516 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:654361308 + datadisk:129571596 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-3 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912388 + disk:249216060 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:478912840 + datadisk:133546860 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:170393272 + datadisk:133226492 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:661848004 + datadisk:133214428 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-4 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912556 + disk:249216408 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:481732908 + datadisk:136364000 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:173351696 + datadisk:136184808 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:667527460 + datadisk:136153100 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 696.70 233544.0 91853.00 -CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 653.01 247449.0 97998.00 -CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 599.12 258276.0 106813.50 -CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 538.01 262975.0 118944.88 +CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 1097.64 144074.0 58301.00 +CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 1026.91 161894.0 62323.00 +CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 908.92 181035.0 70443.25 +CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 675.46 224333.0 94757.50 Warehouses: 128 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[8, 4, 2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 8, 4]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 531.0 1.0 1.0 867.79661 -CockroachDB-1-1-1024-2 531.0 1.0 2.0 867.79661 -CockroachDB-1-1-1024-3 531.0 1.0 4.0 867.79661 -CockroachDB-1-1-1024-4 531.0 1.0 8.0 867.79661 +CockroachDB-1-1-1024-1 1036.0 1.0 1.0 444.787645 +CockroachDB-1-1-1024-2 1036.0 1.0 2.0 444.787645 +CockroachDB-1-1-1024-3 1036.0 1.0 4.0 444.787645 +CockroachDB-1-1-1024-4 1036.0 1.0 8.0 444.787645 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_cockroachdb_2_summary.txt b/logs_tests/doc_benchbase_cockroachdb_2_summary.txt index e05d773fc..78034a94d 100644 --- a/logs_tests/doc_benchbase_cockroachdb_2_summary.txt +++ b/logs_tests/doc_benchbase_cockroachdb_2_summary.txt @@ -3,9 +3,9 @@ ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 15418s - Code: 1730374413 - This includes no queries. Benchbase runs the benchmark + Duration: 15957s + Code: 1734647454 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,61 +21,169 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections CockroachDB-1-1-1024-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912048 + disk:249215616 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:469202612 + datadisk:123845016 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:160789996 + datadisk:123623440 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:644345576 + datadisk:123626156 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-2 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912216 + disk:249215820 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:475115332 + datadisk:129756564 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:166744188 + datadisk:129577516 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:654361308 + datadisk:129571596 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-3 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912388 + disk:249216060 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:478912840 + datadisk:133546860 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:170393272 + datadisk:133226492 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:661848004 + datadisk:133214428 + volume_size:1000G + volume_used:109G CockroachDB-1-1-1024-4 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254912556 + disk:249216408 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:481732908 + datadisk:136364000 + volume_size:1000G + volume_used:109G + worker 1 + RAM:1081751019520 + Cores:128 + host:5.15.0-126-generic + node:cl-worker29 + disk:173351696 + datadisk:136184808 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:667527460 + datadisk:136153100 + volume_size:1000G + volume_used:109G ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 696.70 233544.0 91853.00 -CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 653.01 247449.0 97998.00 -CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 599.12 258276.0 106813.50 -CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 538.01 262975.0 118944.88 +CockroachDB-1-1-1024-1 1 64 16384 1 3600.0 1097.64 144074.0 58301.00 +CockroachDB-1-1-1024-2 1 64 16384 2 3600.0 1026.91 161894.0 62323.00 +CockroachDB-1-1-1024-3 1 64 16384 4 3600.0 908.92 181035.0 70443.25 +CockroachDB-1-1-1024-4 1 64 16384 8 3600.0 675.46 224333.0 94757.50 Warehouses: 128 ### Workflow #### Actual -DBMS CockroachDB-1-1-1024 - Pods [[8, 4, 2, 1]] +DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 8, 4]] #### Planned DBMS CockroachDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -CockroachDB-1-1-1024-1 531.0 1.0 1.0 867.79661 -CockroachDB-1-1-1024-2 531.0 1.0 2.0 867.79661 -CockroachDB-1-1-1024-3 531.0 1.0 4.0 867.79661 -CockroachDB-1-1-1024-4 531.0 1.0 8.0 867.79661 +CockroachDB-1-1-1024-1 1036.0 1.0 1.0 444.787645 +CockroachDB-1-1-1024-2 1036.0 1.0 2.0 444.787645 +CockroachDB-1-1-1024-3 1036.0 1.0 4.0 444.787645 +CockroachDB-1-1-1024-4 1036.0 1.0 8.0 444.787645 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_databaseservice_1.log b/logs_tests/doc_benchbase_databaseservice_1.log new file mode 100644 index 000000000..5951b5621 --- /dev/null +++ b/logs_tests/doc_benchbase_databaseservice_1.log @@ -0,0 +1,187 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734664810 +Experiment : starts at 2024-12-20 04:20:09.627530 (4349611.034664394) +Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. +Benchbase data is generated and loaded using several threads. +Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 1 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is tested with [1] threads, split into [1] pods. +Benchmarking is tested with [16] threads, split into [1, 2] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-1-1-1024 : will start now +- waiting 30s - : done +DatabaseService-1-1-1024 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-1-1-1024 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-1-1-1024 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-1-1-1024 : is not loaded yet +DatabaseService-1-1-1024 : will start loading but not before 2024-12-20 03:23:42 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-1-1-1024 : is not loaded yet +DatabaseService-1-1-1024 : will start loading but not before 2024-12-20 03:23:42 +done +DatabaseService-1-1-1024 : is not loaded yet +DatabaseService-1-1-1024 : start asynch loading scripts of type loaded +DatabaseService-1-1-1024 : is loading +- waiting 30s - : done +DatabaseService-1-1-1024 : is loading +- waiting 30s - : done +DatabaseService-1-1-1024 : is loading +- waiting 30s - : done +DatabaseService-1-1-1024 : is loading +- waiting 30s - : done +DatabaseService-1-1-1024 : is loading +- waiting 30s - : done +DatabaseService-1-1-1024 : is loading +done +DatabaseService-1-1-1024 : showing loader times +DatabaseService-1-1-1024 : loader timespan (first to last [s]) = 150 +DatabaseService-1-1-1024 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'postgres', 'BEXHOMA_HOST': 'bexhoma-service.perdelt.svc.cluster.local', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED'} +DatabaseService-1-1-1024-1 : start benchmarking +Worker pods found: [] +DatabaseService-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734664810 +- waiting 10s - : done +DatabaseService-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-1-1-1024-1734664810-1-qwfcv: found +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +done +DatabaseService-1-1-1024 : has running benchmarks +DatabaseService-1-1-1024-1 : showing benchmarker times +DatabaseService-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 313 +DatabaseService-1-1-1024-1 : benchmarker times (start/end per pod and container) = [] +DatabaseService-1-1-1024-1 : found and updated times {'benchmarker': []} +done +DatabaseService-1-1-1024 : benchmarks done 0 of 1. This will be client 2 +DatabaseService-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'postgres', 'BEXHOMA_HOST': 'bexhoma-service.perdelt.svc.cluster.local', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED'} +DatabaseService-1-1-1024-2 : start benchmarking +Worker pods found: [] +DatabaseService-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734664810 +- waiting 10s - : done +DatabaseService-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-1-1-1024-1734664810-1-dbz5k: found +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +done +DatabaseService-1-1-1024 : has running benchmarks +DatabaseService-1-1-1024-2 : showing benchmarker times +DatabaseService-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 312 +DatabaseService-1-1-1024-2 : benchmarker times (start/end per pod and container) = [] +DatabaseService-1-1-1024-2 : found and updated times {'benchmarker': []} +done +DatabaseService-1-1-1024 : can be stopped +Experiment ends at 2024-12-20 04:38:19.697126 (4350701.104259551): 1090.0695951562375s total +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file +Result workflow complete + +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 1091s + Code: 1734664810 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256016 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256020 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1873.16 19246.0 8535.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1820.81 21236.0 8782.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 150.0 1.0 1.0 384.0 +DatabaseService-1-1-1024-2 150.0 1.0 2.0 384.0 + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_benchbase_databaseservice_1_summary.txt b/logs_tests/doc_benchbase_databaseservice_1_summary.txt new file mode 100644 index 000000000..49f3333c8 --- /dev/null +++ b/logs_tests/doc_benchbase_databaseservice_1_summary.txt @@ -0,0 +1,66 @@ +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 1091s + Code: 1734664810 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256016 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256020 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1873.16 19246.0 8535.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1820.81 21236.0 8782.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 150.0 1.0 1.0 384.0 +DatabaseService-1-1-1024-2 150.0 1.0 2.0 384.0 + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_benchbase_databaseservice_2.log b/logs_tests/doc_benchbase_databaseservice_2.log new file mode 100644 index 000000000..dde3c5402 --- /dev/null +++ b/logs_tests/doc_benchbase_databaseservice_2.log @@ -0,0 +1,165 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734665950 +Experiment : starts at 2024-12-20 04:39:09.834487 (4350751.241621306) +Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. +Benchbase data is generated and loaded using several threads. +Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 1 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is skipped. +Loading is tested with [1] threads, split into [1] pods. +Benchmarking is tested with [16] threads, split into [1, 2] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-1-1-1024 : will start now +- waiting 30s - : done +DatabaseService-1-1-1024 : will start benchmarking but not before 2024-12-20 03:41:10 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-1-1-1024 : will start benchmarking but not before 2024-12-20 03:41:10 +done +DatabaseService-1-1-1024 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'postgres', 'BEXHOMA_HOST': 'bexhoma-service.perdelt.svc.cluster.local', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED'} +DatabaseService-1-1-1024-1 : start benchmarking +Worker pods found: [] +DatabaseService-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734665950 +- waiting 10s - : done +DatabaseService-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-1-1-1024-1734665950-1-ckwjp: found +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +done +DatabaseService-1-1-1024 : has running benchmarks +DatabaseService-1-1-1024-1 : showing benchmarker times +DatabaseService-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 312 +DatabaseService-1-1-1024-1 : benchmarker times (start/end per pod and container) = [] +DatabaseService-1-1-1024-1 : found and updated times {'benchmarker': []} +done +DatabaseService-1-1-1024 : benchmarks done 0 of 1. This will be client 2 +DatabaseService-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'postgres', 'BEXHOMA_HOST': 'bexhoma-service.perdelt.svc.cluster.local', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED'} +DatabaseService-1-1-1024-2 : start benchmarking +Worker pods found: [] +DatabaseService-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734665950 +- waiting 10s - : done +DatabaseService-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-1-1-1024-1734665950-1-47jf6: found +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +- waiting 30s - : done +DatabaseService-1-1-1024 : has running benchmarks +done +DatabaseService-1-1-1024 : has running benchmarks +DatabaseService-1-1-1024-2 : showing benchmarker times +DatabaseService-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 313 +DatabaseService-1-1-1024-2 : benchmarker times (start/end per pod and container) = [] +DatabaseService-1-1-1024-2 : found and updated times {'benchmarker': []} +done +DatabaseService-1-1-1024 : can be stopped +Experiment ends at 2024-12-20 04:52:43.009699 (4351564.416832483): 813.1752111772075s total +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file +Result workflow complete + +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 814s + Code: 1734665950 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1948.43 18329.0 8206.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1774.67 21116.0 9008.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 0 1 1 inf +DatabaseService-1-1-1024-2 0 1 2 inf + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_benchbase_databaseservice_2_summary.txt b/logs_tests/doc_benchbase_databaseservice_2_summary.txt new file mode 100644 index 000000000..64ebd43ef --- /dev/null +++ b/logs_tests/doc_benchbase_databaseservice_2_summary.txt @@ -0,0 +1,67 @@ +## Show Summary + +### Workload +Benchbase Workload SF=16 (warehouses for TPC-C) + Type: benchbase + Duration: 814s + Code: 1734665950 + Benchbase runs the benchmark. + This experiment compares run time and resource consumption of Benchbase queries in different DBMS. + Benchbase data is generated and loaded using several threads. + Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 1 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [1] threads, split into [1] pods. + Benchmarking is tested with [16] threads, split into [1, 2] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-1-1-1024-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi +DatabaseService-1-1-1024-2 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256028 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) +DatabaseService-1-1-1024-1 1 16 16384 1 300.0 1948.43 18329.0 8206.0 +DatabaseService-1-1-1024-2 1 16 16384 2 300.0 1774.67 21116.0 9008.5 + +Warehouses: 16 + +### Workflow + +#### Actual +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +#### Planned +DBMS DatabaseService-1-1-1024 - Pods [[1, 2]] + +### Loading + time_load terminals pods Imported warehouses [1/h] +DatabaseService-1-1-1024-1 0 1 1 inf +DatabaseService-1-1-1024-2 0 1 2 inf + +### Tests +TEST passed: Throughput (requests/second) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_benchbase_yugabytedb_1.log b/logs_tests/doc_benchbase_yugabytedb_1.log index 376b835cf..ad3e7b589 100644 --- a/logs_tests/doc_benchbase_yugabytedb_1.log +++ b/logs_tests/doc_benchbase_yugabytedb_1.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730223936 -Experiment : starts at 2024-10-29 18:45:36.513869 (4959876.637850233) +Experiment : has code 1734627587 +Experiment : starts at 2024-12-19 17:59:46.587090 (4312387.994224559) Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,10 +21,10 @@ done YugabyteDB-1-1-1024 : will start now - waiting 30s - : done YugabyteDB-1-1-1024 : is not loaded yet -YugabyteDB-1-1-1024 : will start loading but not before 2024-10-29 17:47:37 (that is in 60 secs) +YugabyteDB-1-1-1024 : will start loading but not before 2024-12-19 17:01:47 (that is in 60 secs) - waiting 30s - : done YugabyteDB-1-1-1024 : is not loaded yet -YugabyteDB-1-1-1024 : will start loading but not before 2024-10-29 17:47:37 +YugabyteDB-1-1-1024 : will start loading but not before 2024-12-19 17:01:47 done YugabyteDB-1-1-1024 : is not loaded yet YugabyteDB-1-1-1024 : start asynch loading scripts of type loaded @@ -41,15 +41,20 @@ YugabyteDB-1-1-1024 : is loading YugabyteDB-1-1-1024 : is loading - waiting 30s - : done YugabyteDB-1-1-1024 : is loading +- waiting 30s - : done +YugabyteDB-1-1-1024 : is loading done YugabyteDB-1-1-1024 : showing loader times -YugabyteDB-1-1-1024 : loader timespan (first to last [s]) = 200 +YugabyteDB-1-1-1024 : loader timespan (first to last [s]) = 216 YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 1 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-1 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730223936 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734627587 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730223936-1-1-wfs2w: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734627587-1-1-xxtnv: found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -71,16 +76,19 @@ YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-1 : showing benchmarker times -YugabyteDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 312 -YugabyteDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1730224273, 1730224578)] -YugabyteDB-1-1-1024-1 : found and updated times {'benchmarker': [(1730224273, 1730224578)]} +YugabyteDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 313 +YugabyteDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1734627958, 1734628262)] +YugabyteDB-1-1-1024-1 : found and updated times {'benchmarker': [(1734627958, 1734628262)]} done YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 2 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '16', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 300, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-2 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730223936 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734627587 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730223936-1-2-jmmx8: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734627587-1-2-p4jz7: found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -102,33 +110,25 @@ YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-2 : showing benchmarker times -YugabyteDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 313 -YugabyteDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1730224620, 1730224925), (1730224620, 1730224925)] -YugabyteDB-1-1-1024-2 : found and updated times {'benchmarker': [(1730224620, 1730224925), (1730224620, 1730224925)]} +YugabyteDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 314 +YugabyteDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1734628310, 1734628613), (1734628310, 1734628613)] +YugabyteDB-1-1-1024-2 : found and updated times {'benchmarker': [(1734628310, 1734628613), (1734628310, 1734628613)]} done YugabyteDB-1-1-1024 : can be stopped -Experiment ends at 2024-10-29 19:02:42.007475 (4960902.131454072): 1025.4936038386077s total +Experiment ends at 2024-12-19 18:17:33.397073 (4313454.804205057): 1066.8099804986268s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... Latency Distribution.Average Latency (microseconds) -connection_pod ... -YugabyteDB-1-1-1024-1-1 YugabyteDB-1-1-1024-1 ... 40433 -YugabyteDB-1-1-1024-2-1 YugabyteDB-1-1-1024-2 ... 45980 -YugabyteDB-1-1-1024-2-2 YugabyteDB-1-1-1024-2 ... 46247 - -[3 rows x 36 columns] -Workflow {'YugabyteDB-1-1-1024': [[1, 2]]} -Result workflow complete +Result workflow not complete ## Show Summary ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1026s - Code: 1730223936 - This includes no queries. Benchbase runs the benchmark + Duration: 1067s + Code: 1734627587 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -144,45 +144,87 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319408 + disk:249254028 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:440540068 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:585609424 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124275600 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254032 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441361868 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586458512 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125118168 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 395.54 100821.0 40433.0 -YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 346.81 112470.0 46113.5 +YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 403.95 96762.0 39594.0 +YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 362.52 111737.0 44178.0 Warehouses: 16 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] +DBMS YugabyteDB-1-1-1024 - Pods [[2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 200.0 1.0 1.0 288.0 -YugabyteDB-1-1-1024-2 200.0 1.0 2.0 288.0 +YugabyteDB-1-1-1024-1 216.0 1.0 1.0 266.666667 +YugabyteDB-1-1-1024-2 216.0 1.0 2.0 266.666667 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_yugabytedb_1_summary.txt b/logs_tests/doc_benchbase_yugabytedb_1_summary.txt index 5af846c55..b39c93892 100644 --- a/logs_tests/doc_benchbase_yugabytedb_1_summary.txt +++ b/logs_tests/doc_benchbase_yugabytedb_1_summary.txt @@ -3,9 +3,9 @@ ### Workload Benchbase Workload SF=16 (warehouses for TPC-C) Type: benchbase - Duration: 1026s - Code: 1730223936 - This includes no queries. Benchbase runs the benchmark + Duration: 1067s + Code: 1734627587 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 16. Benchmarking runs for 5 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,45 +21,87 @@ Benchbase Workload SF=16 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319408 + disk:249254028 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:440540068 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:585609424 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:124275600 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254032 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441361868 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586458512 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125118168 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 395.54 100821.0 40433.0 -YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 346.81 112470.0 46113.5 +YugabyteDB-1-1-1024-1 1 16 16384 1 300.0 403.95 96762.0 39594.0 +YugabyteDB-1-1-1024-2 1 16 16384 2 300.0 362.52 111737.0 44178.0 Warehouses: 16 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] +DBMS YugabyteDB-1-1-1024 - Pods [[2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 200.0 1.0 1.0 288.0 -YugabyteDB-1-1-1024-2 200.0 1.0 2.0 288.0 +YugabyteDB-1-1-1024-1 216.0 1.0 1.0 266.666667 +YugabyteDB-1-1-1024-2 216.0 1.0 2.0 266.666667 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_yugabytedb_2.log b/logs_tests/doc_benchbase_yugabytedb_2.log index 36fc774a4..4f56527c5 100644 --- a/logs_tests/doc_benchbase_yugabytedb_2.log +++ b/logs_tests/doc_benchbase_yugabytedb_2.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730226312 -Experiment : starts at 2024-10-29 19:25:12.301464 (4962252.425445554) +Experiment : has code 1734628788 +Experiment : starts at 2024-12-19 18:19:48.112043 (4313589.519176751) Experiment : This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,10 +21,10 @@ done YugabyteDB-1-1-1024 : will start now - waiting 30s - : done YugabyteDB-1-1-1024 : is not loaded yet -YugabyteDB-1-1-1024 : will start loading but not before 2024-10-29 18:27:12 (that is in 60 secs) +YugabyteDB-1-1-1024 : will start loading but not before 2024-12-19 17:21:49 (that is in 60 secs) - waiting 30s - : done YugabyteDB-1-1-1024 : is not loaded yet -YugabyteDB-1-1-1024 : will start loading but not before 2024-10-29 18:27:12 +YugabyteDB-1-1-1024 : will start loading but not before 2024-12-19 17:21:49 done YugabyteDB-1-1-1024 : is not loaded yet YugabyteDB-1-1-1024 : start asynch loading scripts of type loaded @@ -97,23 +97,18 @@ YugabyteDB-1-1-1024 : is loading YugabyteDB-1-1-1024 : is loading - waiting 30s - : done YugabyteDB-1-1-1024 : is loading -- waiting 30s - : done -YugabyteDB-1-1-1024 : is loading -- waiting 30s - : done -YugabyteDB-1-1-1024 : is loading -- waiting 30s - : done -YugabyteDB-1-1-1024 : is loading -- waiting 30s - : done -YugabyteDB-1-1-1024 : is loading done YugabyteDB-1-1-1024 : showing loader times -YugabyteDB-1-1-1024 : loader timespan (first to last [s]) = 1151 +YugabyteDB-1-1-1024 : loader timespan (first to last [s]) = 1033 YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 1 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 64, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 16384, 'BENCHBASE_TERMINALS': 64, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-1 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730226312 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734628788 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730226312-1-1-dk9sr: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734628788-1-1-ph9rq: found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -355,16 +350,19 @@ YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-1 : showing benchmarker times -YugabyteDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 3621 -YugabyteDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1730227613, 1730231226)] -YugabyteDB-1-1-1024-1 : found and updated times {'benchmarker': [(1730227613, 1730231226)]} +YugabyteDB-1-1-1024-1 : benchmarker timespan (start to end single container [s]) = 3641 +YugabyteDB-1-1-1024-1 : benchmarker times (start/end per pod and container) = [(1734629981, 1734633595)] +YugabyteDB-1-1-1024-1 : found and updated times {'benchmarker': [(1734629981, 1734633595)]} done YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 2 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 32, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '2', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 8192, 'BENCHBASE_TERMINALS': 32, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-2 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730226312 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734628788 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730226312-1-2-7h7d6: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734628788-1-2-b78gk: found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -605,17 +603,22 @@ YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks +done +YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-2 : showing benchmarker times -YugabyteDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 3623 -YugabyteDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1730231269, 1730234878), (1730231269, 1730234880)] -YugabyteDB-1-1-1024-2 : found and updated times {'benchmarker': [(1730231269, 1730234878), (1730231269, 1730234880)]} +YugabyteDB-1-1-1024-2 : benchmarker timespan (start to end single container [s]) = 3675 +YugabyteDB-1-1-1024-2 : benchmarker times (start/end per pod and container) = [(1734633661, 1734637272), (1734633661, 1734637318)] +YugabyteDB-1-1-1024-2 : found and updated times {'benchmarker': [(1734633661, 1734637272), (1734633661, 1734637318)]} done YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 3 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '4', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 4096, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '4', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 4096, 'BENCHBASE_TERMINALS': 16, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-3 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730226312 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734628788 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730226312-1-3-5j5rn: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734628788-1-3-pcp4c: found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -852,21 +855,28 @@ YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks +done +YugabyteDB-1-1-1024 : has running benchmarks +done +YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-3 : showing benchmarker times -YugabyteDB-1-1-1024-3 : benchmarker timespan (start to end single container [s]) = 3628 -YugabyteDB-1-1-1024-3 : benchmarker times (start/end per pod and container) = [(1730234929, 1730238537), (1730234928, 1730238536), (1730234929, 1730238537), (1730234929, 1730238538)] -YugabyteDB-1-1-1024-3 : found and updated times {'benchmarker': [(1730234929, 1730238537), (1730234928, 1730238536), (1730234929, 1730238537), (1730234929, 1730238538)]} +YugabyteDB-1-1-1024-3 : benchmarker timespan (start to end single container [s]) = 3719 +YugabyteDB-1-1-1024-3 : benchmarker times (start/end per pod and container) = [(1734637375, 1734640991), (1734637375, 1734641019), (1734637375, 1734641073), (1734637375, 1734640998)] +YugabyteDB-1-1-1024-3 : found and updated times {'benchmarker': [(1734637375, 1734640991), (1734637375, 1734641019), (1734637375, 1734641073), (1734637375, 1734640998)]} done YugabyteDB-1-1-1024 : benchmarks done 0 of 1. This will be client 4 -YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '8', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 2048, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_PORT': 5433} +YugabyteDB-1-1-1024 : we will change parameters of benchmark as {'PARALLEL': '8', 'SF': '128', 'BENCHBASE_BENCH': 'tpcc', 'BENCHBASE_PROFILE': 'postgres', 'BEXHOMA_DATABASE': 'yugabyte', 'BENCHBASE_TARGET': 2048, 'BENCHBASE_TERMINALS': 8, 'BENCHBASE_TIME': 3600, 'BENCHBASE_ISOLATION': 'TRANSACTION_READ_COMMITTED', 'BEXHOMA_USER': 'yugabyte', 'BEXHOMA_PASSWORD': '', 'BEXHOMA_PORT': 5433} YugabyteDB-1-1-1024-4 : start benchmarking -YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1730226312 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-1-1-1024 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-1-1-1024 : benchmarking results in folder /home/perdelt/benchmarks/1734628788 - waiting 10s - : done -YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1730226312-1-4-4gqxz: found +YugabyteDB-1-1-1024 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-1-1-1024-1734628788-1-4-7nwvg: .found - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done @@ -1107,9 +1117,9 @@ YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks -done +- waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks -done +- waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks - waiting 30s - : done YugabyteDB-1-1-1024 : has running benchmarks @@ -1117,36 +1127,20 @@ done YugabyteDB-1-1-1024 : has running benchmarks done YugabyteDB-1-1-1024 : has running benchmarks +- waiting 30s - : done +YugabyteDB-1-1-1024 : has running benchmarks +done +YugabyteDB-1-1-1024 : has running benchmarks YugabyteDB-1-1-1024-4 : showing benchmarker times -YugabyteDB-1-1-1024-4 : benchmarker timespan (start to end single container [s]) = 3791 -YugabyteDB-1-1-1024-4 : benchmarker times (start/end per pod and container) = [(1730238595, 1730242235), (1730238595, 1730242255), (1730238595, 1730242231), (1730238595, 1730242224), (1730238595, 1730242245), (1730238595, 1730242322), (1730238595, 1730242213), (1730238596, 1730242350)] -YugabyteDB-1-1-1024-4 : found and updated times {'benchmarker': [(1730238595, 1730242235), (1730238595, 1730242255), (1730238595, 1730242231), (1730238595, 1730242224), (1730238595, 1730242245), (1730238595, 1730242322), (1730238595, 1730242213), (1730238596, 1730242350)]} +YugabyteDB-1-1-1024-4 : benchmarker timespan (start to end single container [s]) = 3910 +YugabyteDB-1-1-1024-4 : benchmarker times (start/end per pod and container) = [(1734641136, 1734644797), (1734641136, 1734644818), (1734641136, 1734644926), (1734641136, 1734644953), (1734641136, 1734644953), (1734641137, 1734645005), (1734641136, 1734644927), (1734641137, 1734644932)] +YugabyteDB-1-1-1024-4 : found and updated times {'benchmarker': [(1734641136, 1734644797), (1734641136, 1734644818), (1734641136, 1734644926), (1734641136, 1734644953), (1734641136, 1734644953), (1734641137, 1734645005), (1734641136, 1734644927), (1734641137, 1734644932)]} done YugabyteDB-1-1-1024 : can be stopped -Experiment ends at 2024-10-29 23:53:30.141071 (4978350.265050158): 16097.839604604058s total +Experiment ends at 2024-12-19 22:51:10.118243 (4329871.525375789): 16282.006199037656s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... Latency Distribution.Average Latency (microseconds) -connection_pod ... -YugabyteDB-1-1-1024-1-1 YugabyteDB-1-1-1024-1 ... 136271 -YugabyteDB-1-1-1024-3-1 YugabyteDB-1-1-1024-3 ... 151569 -YugabyteDB-1-1-1024-2-1 YugabyteDB-1-1-1024-2 ... 142319 -YugabyteDB-1-1-1024-4-1 YugabyteDB-1-1-1024-4 ... 240528 -YugabyteDB-1-1-1024-4-2 YugabyteDB-1-1-1024-4 ... 251836 -YugabyteDB-1-1-1024-4-3 YugabyteDB-1-1-1024-4 ... 295401 -YugabyteDB-1-1-1024-4-4 YugabyteDB-1-1-1024-4 ... 246889 -YugabyteDB-1-1-1024-3-2 YugabyteDB-1-1-1024-3 ... 164894 -YugabyteDB-1-1-1024-4-5 YugabyteDB-1-1-1024-4 ... 225020 -YugabyteDB-1-1-1024-3-3 YugabyteDB-1-1-1024-3 ... 157392 -YugabyteDB-1-1-1024-4-6 YugabyteDB-1-1-1024-4 ... 272495 -YugabyteDB-1-1-1024-4-7 YugabyteDB-1-1-1024-4 ... 268149 -YugabyteDB-1-1-1024-2-2 YugabyteDB-1-1-1024-2 ... 141678 -YugabyteDB-1-1-1024-4-8 YugabyteDB-1-1-1024-4 ... 268839 -YugabyteDB-1-1-1024-3-4 YugabyteDB-1-1-1024-3 ... 162663 - -[15 rows x 36 columns] -Workflow {'YugabyteDB-1-1-1024': [[1, 4, 2, 8]]} Result workflow not complete ## Show Summary @@ -1154,9 +1148,9 @@ Result workflow not complete ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 16098s - Code: 1730226312 - This includes no queries. Benchbase runs the benchmark + Duration: 16283s + Code: 1734628788 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -1172,69 +1166,153 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254048 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:458849332 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:603879336 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:143144816 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319748 + disk:249254240 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450176968 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595302840 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:133934964 YugabyteDB-1-1-1024-3 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319920 + disk:249254432 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450484892 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595477572 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134049872 YugabyteDB-1-1-1024-4 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254320088 + disk:249254644 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:456563680 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:640944212 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134098940 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 469.61 327056.0 136271.00 -YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 450.66 357886.0 141998.50 -YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 402.57 409184.0 159129.50 -YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 247.49 896527.0 258644.62 +YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 482.34 346967.0 132675.00 +YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 388.47 414796.0 164642.00 +YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 376.70 430011.0 169282.00 +YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 258.70 767035.0 244458.62 Warehouses: 128 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 4, 2, 8]] +DBMS YugabyteDB-1-1-1024 - Pods [[8, 4, 2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 1151.0 1.0 1.0 400.347524 -YugabyteDB-1-1-1024-2 1151.0 1.0 2.0 400.347524 -YugabyteDB-1-1-1024-3 1151.0 1.0 4.0 400.347524 -YugabyteDB-1-1-1024-4 1151.0 1.0 8.0 400.347524 +YugabyteDB-1-1-1024-1 1033.0 1.0 1.0 446.07938 +YugabyteDB-1-1-1024-2 1033.0 1.0 2.0 446.07938 +YugabyteDB-1-1-1024-3 1033.0 1.0 4.0 446.07938 +YugabyteDB-1-1-1024-4 1033.0 1.0 8.0 446.07938 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_benchbase_yugabytedb_2_summary.txt b/logs_tests/doc_benchbase_yugabytedb_2_summary.txt index 87e66cdc1..a9f73ab0f 100644 --- a/logs_tests/doc_benchbase_yugabytedb_2_summary.txt +++ b/logs_tests/doc_benchbase_yugabytedb_2_summary.txt @@ -3,9 +3,9 @@ ### Workload Benchbase Workload SF=128 (warehouses for TPC-C) Type: benchbase - Duration: 16098s - Code: 1730226312 - This includes no queries. Benchbase runs the benchmark + Duration: 16283s + Code: 1734628788 + Benchbase runs the benchmark. This experiment compares run time and resource consumption of Benchbase queries in different DBMS. Benchbase data is generated and loaded using several threads. Benchmark is 'tpcc'. Scaling factor (e.g., number of warehouses) is 128. Benchmarking runs for 60 minutes. Target is based on multiples of '1024'. Factors for benchmarking are [16]. @@ -21,69 +21,153 @@ Benchbase Workload SF=128 (warehouses for TPC-C) ### Connections YugabyteDB-1-1-1024-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319580 + disk:249254048 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:458849332 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:603879336 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:143144816 YugabyteDB-1-1-1024-2 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319748 + disk:249254240 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450176968 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595302840 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:133934964 YugabyteDB-1-1-1024-3 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319920 + disk:249254432 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:450484892 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:595477572 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134049872 YugabyteDB-1-1-1024-4 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254320088 + disk:249254644 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:456563680 + worker 1 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:640944212 + worker 2 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:134098940 ### Execution experiment_run terminals target pod_count time Throughput (requests/second) Latency Distribution.95th Percentile Latency (microseconds) Latency Distribution.Average Latency (microseconds) -YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 469.61 327056.0 136271.00 -YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 450.66 357886.0 141998.50 -YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 402.57 409184.0 159129.50 -YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 247.49 896527.0 258644.62 +YugabyteDB-1-1-1024-1 1 64 16384 1 3600.0 482.34 346967.0 132675.00 +YugabyteDB-1-1-1024-2 1 64 16384 2 3600.0 388.47 414796.0 164642.00 +YugabyteDB-1-1-1024-3 1 64 16384 4 3600.0 376.70 430011.0 169282.00 +YugabyteDB-1-1-1024-4 1 64 16384 8 3600.0 258.70 767035.0 244458.62 Warehouses: 128 ### Workflow #### Actual -DBMS YugabyteDB-1-1-1024 - Pods [[1, 4, 2, 8]] +DBMS YugabyteDB-1-1-1024 - Pods [[8, 4, 2, 1]] #### Planned DBMS YugabyteDB-1-1-1024 - Pods [[1, 2, 4, 8]] ### Loading time_load terminals pods Imported warehouses [1/h] -YugabyteDB-1-1-1024-1 1151.0 1.0 1.0 400.347524 -YugabyteDB-1-1-1024-2 1151.0 1.0 2.0 400.347524 -YugabyteDB-1-1-1024-3 1151.0 1.0 4.0 400.347524 -YugabyteDB-1-1-1024-4 1151.0 1.0 8.0 400.347524 +YugabyteDB-1-1-1024-1 1033.0 1.0 1.0 446.07938 +YugabyteDB-1-1-1024-2 1033.0 1.0 2.0 446.07938 +YugabyteDB-1-1-1024-3 1033.0 1.0 4.0 446.07938 +YugabyteDB-1-1-1024-4 1033.0 1.0 8.0 446.07938 ### Tests TEST passed: Throughput (requests/second) contains no 0 or NaN diff --git a/logs_tests/doc_tpcds_testcase_monetdb_storage_summary.txt b/logs_tests/doc_tpcds_testcase_monetdb_storage_summary.txt index 02c562aa8..28932e52d 100644 --- a/logs_tests/doc_tpcds_testcase_monetdb_storage_summary.txt +++ b/logs_tests/doc_tpcds_testcase_monetdb_storage_summary.txt @@ -3,8 +3,8 @@ ### Workload TPC-DS Queries SF=1 Type: tpcds - Duration: 844s - Code: 1731437167 + Duration: 562s + Code: 1731423235 This includes the reading queries of TPC-DS. This experiment compares run time and resource consumption of TPC-DS queries in different DBMS. TPC-DS (SF=1) data is loaded and benchmark is executed. @@ -30,10 +30,10 @@ MonetDB-BHT-8-1-1-1 uses docker image monetdb/monetdb:Aug2024 Cores:64 host:5.15.0-116-generic node:cl-worker11 - disk:352154840 - datadisk:5732867 + disk:352155712 + datadisk:3933059 volume_size:30G - volume_used:4.7G + volume_used:3.8G requests_cpu:4 requests_memory:16Gi MonetDB-BHT-8-2-1-1 uses docker image monetdb/monetdb:Aug2024 @@ -42,10 +42,10 @@ MonetDB-BHT-8-2-1-1 uses docker image monetdb/monetdb:Aug2024 Cores:64 host:5.15.0-116-generic node:cl-worker11 - disk:352154772 - datadisk:5815774 + disk:352154400 + datadisk:3933061 volume_size:30G - volume_used:5.6G + volume_used:3.8G requests_cpu:4 requests_memory:16Gi @@ -57,128 +57,128 @@ No warnings ### Latency of Timer Execution [ms] DBMS MonetDB-BHT-8-1-1-1 MonetDB-BHT-8-2-1-1 -TPC-DS Q1 45.53 453.10 -TPC-DS Q2 170.89 620.87 -TPC-DS Q3 37.73 570.98 -TPC-DS Q4 1552.11 2934.44 -TPC-DS Q5 361.34 3436.13 -TPC-DS Q6 89.06 448.07 -TPC-DS Q7 370.74 1340.50 -TPC-DS Q8 66.85 233.23 -TPC-DS Q9 72.81 417.78 -TPC-DS Q10 204.33 4625.55 -TPC-DS Q11 573.71 653.79 -TPC-DS Q12 24.65 332.23 -TPC-DS Q13 156.36 449.52 -TPC-DS Q14a+b 2545.50 2674.22 -TPC-DS Q15 26.60 182.66 -TPC-DS Q16 918.26 664.30 -TPC-DS Q17 220.65 284.05 -TPC-DS Q18 315.22 435.14 -TPC-DS Q19 40.17 401.33 -TPC-DS Q20 29.16 30.03 -TPC-DS Q21 735.74 2621.45 -TPC-DS Q22 1112.27 1114.57 -TPC-DS Q23a+b 1915.40 2232.28 -TPC-DS Q24a+b 613.63 493.73 -TPC-DS Q25 107.07 109.26 -TPC-DS Q26 226.25 159.58 -TPC-DS Q27 485.47 353.71 -TPC-DS Q28 61.97 71.72 -TPC-DS Q29 84.91 102.23 -TPC-DS Q30 110.30 71.43 -TPC-DS Q31 191.81 420.74 -TPC-DS Q32 16.46 22.47 -TPC-DS Q33 143.38 120.87 -TPC-DS Q34 34.38 91.15 -TPC-DS Q35 144.53 546.07 -TPC-DS Q36 89.90 79.81 -TPC-DS Q37 47.62 109.54 -TPC-DS Q38 174.11 206.20 -TPC-DS Q39a+b 1398.94 1461.25 -TPC-DS Q40 200.19 958.15 -TPC-DS Q41 8.03 11.31 -TPC-DS Q42 19.14 24.42 -TPC-DS Q43 60.30 74.74 -TPC-DS Q44 32.24 132.95 -TPC-DS Q45 25.40 26.36 -TPC-DS Q46 114.02 243.34 -TPC-DS Q47 242.65 237.49 -TPC-DS Q48 103.56 94.71 -TPC-DS Q49 107.98 505.19 -TPC-DS Q50 228.48 160.77 -TPC-DS Q51 577.92 607.93 -TPC-DS Q52 24.20 23.36 -TPC-DS Q53 30.41 29.93 -TPC-DS Q54 28.64 115.20 -TPC-DS Q55 16.29 21.27 -TPC-DS Q56 52.27 147.91 -TPC-DS Q57 83.70 150.09 -TPC-DS Q58 46.05 64.11 -TPC-DS Q59 90.08 129.81 -TPC-DS Q60 26.80 34.17 -TPC-DS Q61 39.92 57.84 -TPC-DS Q62 32.47 75.63 -TPC-DS Q63 31.45 29.39 -TPC-DS Q64 427.17 858.60 -TPC-DS Q65 99.21 125.34 -TPC-DS Q66 269.13 864.91 -TPC-DS Q67 632.07 646.41 -TPC-DS Q68 37.59 48.04 -TPC-DS Q69 95.86 22.80 -TPC-DS Q70 618.59 144.58 -TPC-DS Q71 32.00 34.97 -TPC-DS Q72 689.63 751.43 -TPC-DS Q73 25.73 26.13 -TPC-DS Q74 187.43 194.98 -TPC-DS Q75 680.70 707.70 -TPC-DS Q76 86.76 790.43 -TPC-DS Q77 95.93 159.49 -TPC-DS Q78 769.74 808.69 -TPC-DS Q79 64.20 71.45 -TPC-DS Q80 419.09 439.57 -TPC-DS Q81 155.22 188.96 -TPC-DS Q82 182.37 223.20 -TPC-DS Q83 55.29 92.94 -TPC-DS Q84 38.24 134.47 -TPC-DS Q85 39.50 180.56 -TPC-DS Q86 32.13 32.57 -TPC-DS Q87 264.97 284.99 -TPC-DS Q88 111.57 524.08 -TPC-DS Q89 37.49 35.67 -TPC-DS Q90 17.93 16.69 -TPC-DS Q91 113.53 230.38 -TPC-DS Q92 13.38 13.15 -TPC-DS Q93 99.23 89.90 -TPC-DS Q94 20.40 782.21 -TPC-DS Q95 125.28 91.60 -TPC-DS Q96 14.64 17.66 -TPC-DS Q97 233.32 211.95 -TPC-DS Q98 43.63 43.64 -TPC-DS Q99 75.10 164.92 +TPC-DS Q1 486.60 446.46 +TPC-DS Q2 735.75 496.80 +TPC-DS Q3 1769.81 659.53 +TPC-DS Q4 3358.36 2602.90 +TPC-DS Q5 3171.96 2093.09 +TPC-DS Q6 504.90 308.74 +TPC-DS Q7 1879.38 1008.73 +TPC-DS Q8 659.66 212.56 +TPC-DS Q9 286.20 205.52 +TPC-DS Q10 5364.11 4483.40 +TPC-DS Q11 697.10 673.49 +TPC-DS Q12 356.90 273.96 +TPC-DS Q13 202.20 170.23 +TPC-DS Q14a+b 2933.79 2743.93 +TPC-DS Q15 31.66 27.08 +TPC-DS Q16 1167.22 745.83 +TPC-DS Q17 338.52 228.60 +TPC-DS Q18 381.40 277.50 +TPC-DS Q19 114.67 78.70 +TPC-DS Q20 30.87 30.24 +TPC-DS Q21 3169.38 1899.49 +TPC-DS Q22 2379.59 1619.44 +TPC-DS Q23a+b 2909.42 2865.39 +TPC-DS Q24a+b 776.45 451.26 +TPC-DS Q25 143.88 114.63 +TPC-DS Q26 62.46 45.48 +TPC-DS Q27 125.82 110.59 +TPC-DS Q28 72.60 68.92 +TPC-DS Q29 110.84 110.92 +TPC-DS Q30 172.32 121.37 +TPC-DS Q31 467.73 275.80 +TPC-DS Q32 18.83 19.72 +TPC-DS Q33 298.00 124.45 +TPC-DS Q34 742.48 335.74 +TPC-DS Q35 92.56 95.23 +TPC-DS Q36 91.34 92.79 +TPC-DS Q37 72.14 82.98 +TPC-DS Q38 197.99 209.99 +TPC-DS Q39a+b 2040.00 1667.93 +TPC-DS Q40 792.22 181.19 +TPC-DS Q41 20.45 8.41 +TPC-DS Q42 26.41 24.63 +TPC-DS Q43 61.78 74.05 +TPC-DS Q44 710.50 579.30 +TPC-DS Q45 31.67 28.30 +TPC-DS Q46 280.54 97.61 +TPC-DS Q47 268.06 259.01 +TPC-DS Q48 108.05 112.13 +TPC-DS Q49 564.18 281.98 +TPC-DS Q50 220.37 127.77 +TPC-DS Q51 632.36 592.09 +TPC-DS Q52 27.67 21.20 +TPC-DS Q53 31.81 26.92 +TPC-DS Q54 28.68 23.02 +TPC-DS Q55 16.55 17.33 +TPC-DS Q56 72.80 52.79 +TPC-DS Q57 137.53 88.66 +TPC-DS Q58 64.60 44.84 +TPC-DS Q59 126.84 100.64 +TPC-DS Q60 25.41 26.17 +TPC-DS Q61 87.56 44.10 +TPC-DS Q62 469.55 129.58 +TPC-DS Q63 26.61 37.29 +TPC-DS Q64 887.89 377.60 +TPC-DS Q65 141.28 99.52 +TPC-DS Q66 494.04 680.16 +TPC-DS Q67 701.38 660.23 +TPC-DS Q68 221.30 40.34 +TPC-DS Q69 30.22 21.72 +TPC-DS Q70 449.74 210.09 +TPC-DS Q71 123.15 68.38 +TPC-DS Q72 1045.01 395.32 +TPC-DS Q73 27.43 26.80 +TPC-DS Q74 182.78 193.39 +TPC-DS Q75 798.84 742.67 +TPC-DS Q76 520.86 470.76 +TPC-DS Q77 102.27 89.61 +TPC-DS Q78 800.57 786.99 +TPC-DS Q79 80.46 281.73 +TPC-DS Q80 427.69 464.60 +TPC-DS Q81 155.50 61.77 +TPC-DS Q82 104.57 199.35 +TPC-DS Q83 106.50 51.99 +TPC-DS Q84 333.50 50.06 +TPC-DS Q85 126.02 87.67 +TPC-DS Q86 29.07 37.32 +TPC-DS Q87 277.78 274.00 +TPC-DS Q88 440.20 142.21 +TPC-DS Q89 33.32 39.56 +TPC-DS Q90 29.34 18.02 +TPC-DS Q91 177.66 60.11 +TPC-DS Q92 13.51 14.14 +TPC-DS Q93 100.60 93.75 +TPC-DS Q94 411.45 35.92 +TPC-DS Q95 141.73 151.27 +TPC-DS Q96 13.54 16.58 +TPC-DS Q97 196.04 215.72 +TPC-DS Q98 40.15 43.13 +TPC-DS Q99 61.77 106.40 ### Loading [s] timeGenerate timeIngesting timeSchema timeIndex timeLoad -MonetDB-BHT-8-1-1-1 0.0 123.0 5.0 76.0 211.0 -MonetDB-BHT-8-2-1-1 0.0 123.0 5.0 76.0 211.0 +MonetDB-BHT-8-1-1-1 1.0 125.0 7.0 100.0 243.0 +MonetDB-BHT-8-2-1-1 1.0 125.0 7.0 100.0 243.0 ### Geometric Mean of Medians of Timer Run [s] Geo Times [s] DBMS -MonetDB-BHT-8-1-1-1 0.11 -MonetDB-BHT-8-2-1-1 0.20 +MonetDB-BHT-8-1-1-1 0.21 +MonetDB-BHT-8-2-1-1 0.15 ### Power@Size Power@Size [~Q/h] DBMS -MonetDB-BHT-8-1-1-1 33800.04 -MonetDB-BHT-8-2-1-1 19415.84 +MonetDB-BHT-8-1-1-1 18236.61 +MonetDB-BHT-8-2-1-1 25084.76 ### Throughput@Size time [s] count SF Throughput@Size [~GB/h] DBMS SF num_experiment num_client -MonetDB-BHT-8-1-1 1 1 1 45 1 1 1760.0 -MonetDB-BHT-8-2-1 1 2 1 115 1 1 688.7 +MonetDB-BHT-8-1-1 1 1 1 83 1 1 954.22 +MonetDB-BHT-8-2-1 1 2 1 68 1 1 1164.71 ### Workflow diff --git a/logs_tests/doc_tpch_monetdb_3.log b/logs_tests/doc_tpch_monetdb_3.log index 1c5199b0b..12a5cc57c 100644 --- a/logs_tests/doc_tpch_monetdb_3.log +++ b/logs_tests/doc_tpch_monetdb_3.log @@ -1,13 +1,15 @@ +nohup: Eingabe wird ignoriert Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1728344200 -Experiment : starts at 2024-10-08 01:36:40.264374 (3080140.388347578) +Experiment : has code 1734693433 +Experiment : starts at 2024-12-20 12:17:13.412070 (4378234.819201565) Experiment : This experiment compares run time and resource consumption of TPC-H queries in different DBMS. TPC-H (SF=100) data is loaded and benchmark is executed. Query ordering is Q1 - Q22. All instances use the same query parameters. +Timeout per query is 1200. Import sets indexes and constraints after loading and recomputes statistics. System metrics are monitored by a cluster-wide installation. Benchmark is limited to DBMS ['MonetDB']. @@ -18,205 +20,583 @@ SUT is fixed to cl-worker11. Database is persisted to disk of type shared and size 300Gi. Loading is tested with [8] threads, split into [8] pods. Benchmarking is tested with [1] threads, split into [1] pods. -Benchmarking is run as [1, 1, 5, 5] times the number of benchmarking pods. +Benchmarking is run as [1, 1, 3] times the number of benchmarking pods. Experiment is run once. Cluster monitoring : is running +done +MonetDB-BHT-8 : will start now - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster +MonetDB-BHT-8 : is not loaded yet +MonetDB-BHT-8 : will start loading but not before 2024-12-20 11:19:14 (that is in 60 secs) - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster +MonetDB-BHT-8 : is not loaded yet +MonetDB-BHT-8 : will start loading but not before 2024-12-20 11:19:14 +done +MonetDB-BHT-8 : is not loaded yet +MonetDB-BHT-8 : start asynch loading scripts of type loaded +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has to wait - 1 running and 0 pending pods: max is 1 pods per cluster -done -MonetDB-BHT-8 : will start now -MonetDB-BHT-8 : storage exists bexhoma-storage-monetdb-tpch-100 -MonetDB-BHT-8 : loading is set to finished +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : will start benchmarking but not before 2024-10-07 23:41:41 (that is in 60 secs) +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : will start benchmarking but not before 2024-10-07 23:41:41 -done -MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 1 -MonetDB-BHT-8-1 : start benchmarking -MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1728344200 -- waiting 10s - : done -MonetDB-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-monetdb-bht-8-1728344200-1-1-nqxzb: found +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading done -MonetDB-BHT-8 : has running benchmarks -MonetDB-BHT-8-1 : showing benchmarker times -MonetDB-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 1457 -MonetDB-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1728344513, 1728345947)] -MonetDB-BHT-8-1 : found and updated times {'benchmarker': [(1728344513, 1728345947)]} -MonetDB-BHT-8-1 : collecting execution metrics of SUT -MonetDB-BHT-8-1 : collecting metrics of benchmarker +MonetDB-BHT-8 : is loading done -MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 2 -MonetDB-BHT-8-2 : start benchmarking -MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1728344200 -- waiting 10s - : done -MonetDB-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-monetdb-bht-8-1728344200-1-2-rnkdm: found +MonetDB-BHT-8 : is loading +done +MonetDB-BHT-8 : is loading +done +MonetDB-BHT-8 : is loading +done +MonetDB-BHT-8 : is loading +done +MonetDB-BHT-8 : is loading +done +MonetDB-BHT-8 : showing loader times +MonetDB-BHT-8 : generator timespan (first to last [s]) = 955 +MonetDB-BHT-8 : loader timespan (first to last [s]) = 1602 +MonetDB-BHT-8 : total timespan (first to last [s]) = 2573 +MonetDB-BHT-8 : start asynch loading scripts of type indexed +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8 : is loading - waiting 30s - : done -MonetDB-BHT-8 : has running benchmarks -done -MonetDB-BHT-8 : has running benchmarks -MonetDB-BHT-8-2 : showing benchmarker times -MonetDB-BHT-8-2 : benchmarker timespan (start to end single container [s]) = 764 -MonetDB-BHT-8-2 : benchmarker times (start/end per pod and container) = [(1728346015, 1728346766)] -MonetDB-BHT-8-2 : found and updated times {'benchmarker': [(1728346015, 1728346766)]} -MonetDB-BHT-8-2 : collecting execution metrics of SUT -MonetDB-BHT-8-2 : collecting metrics of benchmarker +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading +- waiting 30s - : done +MonetDB-BHT-8 : is loading done -MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 3 -MonetDB-BHT-8-3 : start benchmarking -MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1728344200 +MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 1 +MonetDB-BHT-8-1 : start benchmarking +Worker pods found: [] +MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734693433 - waiting 10s - : done -MonetDB-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-monetdb-bht-8-1728344200-1-3-44jlc: found +found +MonetDB-BHT-8-1 : collecting loading metrics of SUT +MonetDB-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-1 : collecting metrics of data generator +MonetDB-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-1 : collecting metrics of data injector +MonetDB-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done @@ -257,8 +637,29 @@ MonetDB-BHT-8 : has running benchmarks MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks -- waiting 30s - : done +done MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8-1 : showing benchmarker times +MonetDB-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 662 +MonetDB-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1734701875, 1734702503)] +MonetDB-BHT-8-1 : found and updated times {'benchmarker': [(1734701875, 1734702503)]} +MonetDB-BHT-8-1 : collecting execution metrics of SUT +MonetDB-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-1 : collecting metrics of benchmarker +MonetDB-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 2 +MonetDB-BHT-8-2 : start benchmarking +Worker pods found: [] +MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734693433 +- waiting 10s - : done +found +MonetDB-BHT-8-2 : collecting loading metrics of SUT +MonetDB-BHT-8-2 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-2 : collecting metrics of data generator +MonetDB-BHT-8-2 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-2 : collecting metrics of data injector +MonetDB-BHT-8-2 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done @@ -277,24 +678,6 @@ MonetDB-BHT-8 : has running benchmarks MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks -done -MonetDB-BHT-8 : has running benchmarks -done -MonetDB-BHT-8 : has running benchmarks -done -MonetDB-BHT-8 : has running benchmarks -MonetDB-BHT-8-3 : showing benchmarker times -MonetDB-BHT-8-3 : benchmarker timespan (start to end single container [s]) = 1009 -MonetDB-BHT-8-3 : benchmarker times (start/end per pod and container) = [(1728346836, 1728347782), (1728346836, 1728347760), (1728346837, 1728347795), (1728346837, 1728347796), (1728346836, 1728347790)] -MonetDB-BHT-8-3 : found and updated times {'benchmarker': [(1728346836, 1728347782), (1728346836, 1728347760), (1728346837, 1728347795), (1728346837, 1728347796), (1728346836, 1728347790)]} -MonetDB-BHT-8-3 : collecting execution metrics of SUT -MonetDB-BHT-8-3 : collecting metrics of benchmarker -done -MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 4 -MonetDB-BHT-8-4 : start benchmarking -MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1728344200 -- waiting 10s - : done -MonetDB-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-monetdb-bht-8-1728344200-1-4-27j5d: found - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done @@ -313,8 +696,29 @@ MonetDB-BHT-8 : has running benchmarks MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks -- waiting 30s - : done +done MonetDB-BHT-8 : has running benchmarks +MonetDB-BHT-8-2 : showing benchmarker times +MonetDB-BHT-8-2 : benchmarker timespan (start to end single container [s]) = 603 +MonetDB-BHT-8-2 : benchmarker times (start/end per pod and container) = [(1734702579, 1734703146)] +MonetDB-BHT-8-2 : found and updated times {'benchmarker': [(1734702579, 1734703146)]} +MonetDB-BHT-8-2 : collecting execution metrics of SUT +MonetDB-BHT-8-2 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-2 : collecting metrics of benchmarker +MonetDB-BHT-8-2 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +MonetDB-BHT-8 : benchmarks done 0 of 1. This will be client 3 +MonetDB-BHT-8-3 : start benchmarking +Worker pods found: [] +MonetDB-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734693433 +- waiting 10s - : done +found +MonetDB-BHT-8-3 : collecting loading metrics of SUT +MonetDB-BHT-8-3 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-3 : collecting metrics of data generator +MonetDB-BHT-8-3 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-3 : collecting metrics of data injector +MonetDB-BHT-8-3 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done MonetDB-BHT-8 : has running benchmarks - waiting 30s - : done @@ -355,14 +759,226 @@ done MonetDB-BHT-8 : has running benchmarks done MonetDB-BHT-8 : has running benchmarks -MonetDB-BHT-8-4 : showing benchmarker times -MonetDB-BHT-8-4 : benchmarker timespan (start to end single container [s]) = 919 -MonetDB-BHT-8-4 : benchmarker times (start/end per pod and container) = [(1728347894, 1728348760), (1728347894, 1728348760), (1728347894, 1728348768), (1728347894, 1728348756), (1728347894, 1728348768)] -MonetDB-BHT-8-4 : found and updated times {'benchmarker': [(1728347894, 1728348760), (1728347894, 1728348760), (1728347894, 1728348768), (1728347894, 1728348756), (1728347894, 1728348768)]} -MonetDB-BHT-8-4 : collecting execution metrics of SUT -MonetDB-BHT-8-4 : collecting metrics of benchmarker +MonetDB-BHT-8-3 : showing benchmarker times +MonetDB-BHT-8-3 : benchmarker timespan (start to end single container [s]) = 646 +MonetDB-BHT-8-3 : benchmarker times (start/end per pod and container) = [(1734703227, 1734703832), (1734703227, 1734703827), (1734703227, 1734703819)] +MonetDB-BHT-8-3 : found and updated times {'benchmarker': [(1734703227, 1734703832), (1734703227, 1734703827), (1734703227, 1734703819)]} +MonetDB-BHT-8-3 : collecting execution metrics of SUT +MonetDB-BHT-8-3 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +MonetDB-BHT-8-3 : collecting metrics of benchmarker +MonetDB-BHT-8-3 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)monetdb-bht-8-1734693433(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} done MonetDB-BHT-8 : can be stopped -Experiment : ends at 2024-10-08 02:54:05.075584 (3084785.199564025) - 4644.81s total +Experiment : ends at 2024-12-20 15:11:45.007672 (4388706.414805365) - 10471.60s total Join results done! -Build evaluation cube \ No newline at end of file +done! +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file + +## Show Summary + +### Workload +TPC-H Queries SF=100 + Type: tpch + Duration: 10472s + Code: 1734693433 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=100) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['MonetDB']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 300Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1, 1, 3] times the number of benchmarking pods. + Experiment is run once. + +### Connections +MonetDB-BHT-8-1-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250230356 + datadisk:218202266 + volume_size:300G + volume_used:209G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-2-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250230364 + datadisk:223241760 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-2 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-3 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) + MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Pricing Summary Report (TPC-H Q1) False False True True True +Discounted Revenue (TPC-H Q19) True True True True True +Pricing Summary Report (TPC-H Q1) +MonetDB-BHT-8-3-2: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-3: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +Discounted Revenue (TPC-H Q19) +MonetDB-BHT-8-3-2: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-3: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-1-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-2-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded + +### Warnings (result mismatch) + MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Pricing Summary Report (TPC-H Q1) True True False False False + +### Latency of Timer Execution [ms] +DBMS MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Minimum Cost Supplier Query (TPC-H Q2) 3194.11 1670.45 5828.99 366.88 5829.51 +Shipping Priority (TPC-H Q3) 13761.14 13605.94 47417.47 30838.94 47307.44 +Order Priority Checking Query (TPC-H Q4) 12545.57 12306.40 30806.00 11911.52 32084.56 +Local Supplier Volume (TPC-H Q5) 11432.09 10183.79 13922.48 9918.79 12919.25 +Forecasting Revenue Change (TPC-H Q6) 6597.76 4943.78 6681.21 4111.89 6729.92 +Forecasting Revenue Change (TPC-H Q7) 8123.89 3621.12 10699.60 12694.40 10829.30 +National Market Share (TPC-H Q8) 60812.96 35310.95 33701.69 32777.41 34062.49 +Product Type Profit Measure (TPC-H Q9) 17328.66 17680.32 18508.85 19264.58 17902.92 +Forecasting Revenue Change (TPC-H Q10) 27227.63 26825.03 26808.97 27131.83 26433.34 +Important Stock Identification (TPC-H Q11) 1193.79 1322.06 1379.97 670.82 1770.63 +Shipping Modes and Order Priority (TPC-H Q12) 4938.74 4920.75 8202.82 6538.83 8342.99 +Customer Distribution (TPC-H Q13) 106149.62 108491.70 127149.26 111621.90 124587.94 +Forecasting Revenue Change (TPC-H Q14) 8514.29 6729.16 274.97 7634.95 328.72 +Top Supplier Query (TPC-H Q15) 9665.45 6289.91 5467.42 7270.07 5792.61 +Parts/Supplier Relationship (TPC-H Q16) 12033.89 12450.62 11841.51 14016.50 11828.14 +Small-Quantity-Order Revenue (TPC-H Q17) 18140.36 15204.61 15480.68 19180.17 17408.95 +Large Volume Customer (TPC-H Q18) 37795.66 19125.43 24020.87 24815.32 25415.36 +Potential Part Promotion (TPC-H Q20) 4293.23 4181.49 7021.23 6428.42 6791.12 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 17644.97 16196.43 47957.20 42946.58 33884.11 +Global Sales Opportunity Query (TPC-H Q22) 6665.92 6907.86 6349.41 6282.68 7720.72 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +MonetDB-BHT-8-1-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-2-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-2 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-3 955.0 1602.0 6.0 5671.0 8250.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +MonetDB-BHT-8-1-1 12.49 +MonetDB-BHT-8-2-1 10.16 +MonetDB-BHT-8-3-1 12.53 +MonetDB-BHT-8-3-2 11.64 +MonetDB-BHT-8-3-3 12.69 + +### Power@Size + Power@Size [~Q/h] +DBMS +MonetDB-BHT-8-1-1 30931.10 +MonetDB-BHT-8-2-1 37542.45 +MonetDB-BHT-8-3-1 30428.55 +MonetDB-BHT-8-3-2 33693.59 +MonetDB-BHT-8-3-3 29802.28 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +MonetDB-BHT-8-1 100 1 1 628 1 100 12611.46 +MonetDB-BHT-8-2 100 1 2 567 1 100 13968.25 +MonetDB-BHT-8-3 100 1 3 605 3 100 39272.73 + +### Workflow + +#### Actual +DBMS MonetDB-BHT-8 - Pods [[1, 1, 3]] + +#### Planned +DBMS MonetDB-BHT-8 - Pods [[1, 1, 3]] + +### Ingestion - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 19913.85 12.72 107.69 205.07 +MonetDB-BHT-8-2 19913.85 12.72 107.69 205.07 +MonetDB-BHT-8-3 19913.85 12.72 107.69 205.07 + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 2348.27 1.26 0.02 0.02 +MonetDB-BHT-8-2 2348.27 1.26 0.02 0.02 +MonetDB-BHT-8-3 2348.27 1.26 0.02 0.02 + +### Execution - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 6101.85 23.08 132.79 260.12 +MonetDB-BHT-8-2 6120.54 11.71 151.97 276.09 +MonetDB-BHT-8-3 11331.73 21.06 183.26 313.77 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 26.05 0.01 0.29 0.30 +MonetDB-BHT-8-2 26.05 0.02 0.52 0.54 +MonetDB-BHT-8-3 59.23 0.08 0.98 0.99 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST failed: SQL errors +TEST failed: SQL warnings (result mismatch) +TEST passed: Ingestion SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_monetdb_3_summary.txt b/logs_tests/doc_tpch_monetdb_3_summary.txt index e69de29bb..7fba2b98c 100644 --- a/logs_tests/doc_tpch_monetdb_3_summary.txt +++ b/logs_tests/doc_tpch_monetdb_3_summary.txt @@ -0,0 +1,206 @@ +## Show Summary + +### Workload +TPC-H Queries SF=100 + Type: tpch + Duration: 10472s + Code: 1734693433 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=100) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['MonetDB']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 300Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1, 1, 3] times the number of benchmarking pods. + Experiment is run once. + +### Connections +MonetDB-BHT-8-1-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250230356 + datadisk:218202266 + volume_size:300G + volume_used:209G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-2-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250230364 + datadisk:223241760 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-1 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-2 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi +MonetDB-BHT-8-3-3 uses docker image monetdb/monetdb:Aug2024 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250062784 + datadisk:223241831 + volume_size:300G + volume_used:213G + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) + MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Pricing Summary Report (TPC-H Q1) False False True True True +Discounted Revenue (TPC-H Q19) True True True True True +Pricing Summary Report (TPC-H Q1) +MonetDB-BHT-8-3-2: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-3: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +Discounted Revenue (TPC-H Q19) +MonetDB-BHT-8-3-2: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-3: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-1-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-2-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded +MonetDB-BHT-8-3-1: numRun 1: : java.sql.SQLException: GDK reported error: GDKextendf: could not extend file: Disk quota exceeded + +### Warnings (result mismatch) + MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Pricing Summary Report (TPC-H Q1) True True False False False + +### Latency of Timer Execution [ms] +DBMS MonetDB-BHT-8-1-1 MonetDB-BHT-8-2-1 MonetDB-BHT-8-3-1 MonetDB-BHT-8-3-2 MonetDB-BHT-8-3-3 +Minimum Cost Supplier Query (TPC-H Q2) 3194.11 1670.45 5828.99 366.88 5829.51 +Shipping Priority (TPC-H Q3) 13761.14 13605.94 47417.47 30838.94 47307.44 +Order Priority Checking Query (TPC-H Q4) 12545.57 12306.40 30806.00 11911.52 32084.56 +Local Supplier Volume (TPC-H Q5) 11432.09 10183.79 13922.48 9918.79 12919.25 +Forecasting Revenue Change (TPC-H Q6) 6597.76 4943.78 6681.21 4111.89 6729.92 +Forecasting Revenue Change (TPC-H Q7) 8123.89 3621.12 10699.60 12694.40 10829.30 +National Market Share (TPC-H Q8) 60812.96 35310.95 33701.69 32777.41 34062.49 +Product Type Profit Measure (TPC-H Q9) 17328.66 17680.32 18508.85 19264.58 17902.92 +Forecasting Revenue Change (TPC-H Q10) 27227.63 26825.03 26808.97 27131.83 26433.34 +Important Stock Identification (TPC-H Q11) 1193.79 1322.06 1379.97 670.82 1770.63 +Shipping Modes and Order Priority (TPC-H Q12) 4938.74 4920.75 8202.82 6538.83 8342.99 +Customer Distribution (TPC-H Q13) 106149.62 108491.70 127149.26 111621.90 124587.94 +Forecasting Revenue Change (TPC-H Q14) 8514.29 6729.16 274.97 7634.95 328.72 +Top Supplier Query (TPC-H Q15) 9665.45 6289.91 5467.42 7270.07 5792.61 +Parts/Supplier Relationship (TPC-H Q16) 12033.89 12450.62 11841.51 14016.50 11828.14 +Small-Quantity-Order Revenue (TPC-H Q17) 18140.36 15204.61 15480.68 19180.17 17408.95 +Large Volume Customer (TPC-H Q18) 37795.66 19125.43 24020.87 24815.32 25415.36 +Potential Part Promotion (TPC-H Q20) 4293.23 4181.49 7021.23 6428.42 6791.12 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 17644.97 16196.43 47957.20 42946.58 33884.11 +Global Sales Opportunity Query (TPC-H Q22) 6665.92 6907.86 6349.41 6282.68 7720.72 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +MonetDB-BHT-8-1-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-2-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-1 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-2 955.0 1602.0 6.0 5671.0 8250.0 +MonetDB-BHT-8-3-3 955.0 1602.0 6.0 5671.0 8250.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +MonetDB-BHT-8-1-1 12.49 +MonetDB-BHT-8-2-1 10.16 +MonetDB-BHT-8-3-1 12.53 +MonetDB-BHT-8-3-2 11.64 +MonetDB-BHT-8-3-3 12.69 + +### Power@Size + Power@Size [~Q/h] +DBMS +MonetDB-BHT-8-1-1 30931.10 +MonetDB-BHT-8-2-1 37542.45 +MonetDB-BHT-8-3-1 30428.55 +MonetDB-BHT-8-3-2 33693.59 +MonetDB-BHT-8-3-3 29802.28 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +MonetDB-BHT-8-1 100 1 1 628 1 100 12611.46 +MonetDB-BHT-8-2 100 1 2 567 1 100 13968.25 +MonetDB-BHT-8-3 100 1 3 605 3 100 39272.73 + +### Workflow + +#### Actual +DBMS MonetDB-BHT-8 - Pods [[1, 1, 3]] + +#### Planned +DBMS MonetDB-BHT-8 - Pods [[1, 1, 3]] + +### Ingestion - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 19913.85 12.72 107.69 205.07 +MonetDB-BHT-8-2 19913.85 12.72 107.69 205.07 +MonetDB-BHT-8-3 19913.85 12.72 107.69 205.07 + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 2348.27 1.26 0.02 0.02 +MonetDB-BHT-8-2 2348.27 1.26 0.02 0.02 +MonetDB-BHT-8-3 2348.27 1.26 0.02 0.02 + +### Execution - SUT + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 6101.85 23.08 132.79 260.12 +MonetDB-BHT-8-2 6120.54 11.71 151.97 276.09 +MonetDB-BHT-8-3 11331.73 21.06 183.26 313.77 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +MonetDB-BHT-8-1 26.05 0.01 0.29 0.30 +MonetDB-BHT-8-2 26.05 0.02 0.52 0.54 +MonetDB-BHT-8-3 59.23 0.08 0.98 0.99 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST failed: SQL errors +TEST failed: SQL warnings (result mismatch) +TEST passed: Ingestion SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution SUT contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_1.log b/logs_tests/doc_tpch_testcase_databaseservice_1.log new file mode 100644 index 000000000..b0ac3c9ea --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_1.log @@ -0,0 +1,214 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734666830 +Experiment : starts at 2024-12-20 04:53:50.730248 (4351632.137378045) +Experiment : This experiment compares run time and resource consumption of TPC-H queries in different DBMS. +TPC-H (SF=3) data is loaded and benchmark is executed. +Query ordering is Q1 - Q22. +All instances use the same query parameters. +Timeout per query is 1200. +Import sets indexes and constraints after loading and recomputes statistics. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is tested with [8] threads, split into [8] pods. +Benchmarking is tested with [1] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-BHT-8 : will start now +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : will start loading but not before 2024-12-20 03:57:23 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : will start loading but not before 2024-12-20 03:57:23 +done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : start asynch loading scripts of type loaded +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +done +DatabaseService-BHT-8 : showing loader times +DatabaseService-BHT-8 : generator timespan (first to last [s]) = 1 +DatabaseService-BHT-8 : loader timespan (first to last [s]) = 97 +DatabaseService-BHT-8 : total timespan (first to last [s]) = 105 +DatabaseService-BHT-8 : start asynch loading scripts of type indexed +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +done +DatabaseService-BHT-8 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-BHT-8-1 : start benchmarking +Worker pods found: [] +DatabaseService-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734666830 +- waiting 10s - : done +found +DatabaseService-BHT-8-1 : collecting metrics of data generator +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734666830(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +DatabaseService-BHT-8-1 : collecting metrics of data injector +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734666830(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +done +DatabaseService-BHT-8 : has running benchmarks +DatabaseService-BHT-8-1 : showing benchmarker times +DatabaseService-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 111 +DatabaseService-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1734667451, 1734667528)] +DatabaseService-BHT-8-1 : found and updated times {'benchmarker': [(1734667451, 1734667528)]} +DatabaseService-BHT-8-1 : collecting metrics of benchmarker +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734666830(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-BHT-8 : can be stopped +Experiment : ends at 2024-12-20 05:06:35.119825 (4352396.526959069) - 764.39s total +Join results done! +done! +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file + +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 765s + Code: 1734666830 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256232 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6184.88 +Minimum Cost Supplier Query (TPC-H Q2) 2121.70 +Shipping Priority (TPC-H Q3) 2477.89 +Order Priority Checking Query (TPC-H Q4) 3120.41 +Local Supplier Volume (TPC-H Q5) 2263.59 +Forecasting Revenue Change (TPC-H Q6) 1158.38 +Forecasting Revenue Change (TPC-H Q7) 2326.20 +National Market Share (TPC-H Q8) 1410.37 +Product Type Profit Measure (TPC-H Q9) 3187.17 +Forecasting Revenue Change (TPC-H Q10) 3063.93 +Important Stock Identification (TPC-H Q11) 561.36 +Shipping Modes and Order Priority (TPC-H Q12) 2457.12 +Customer Distribution (TPC-H Q13) 6562.69 +Forecasting Revenue Change (TPC-H Q14) 1280.33 +Top Supplier Query (TPC-H Q15) 1396.18 +Parts/Supplier Relationship (TPC-H Q16) 1346.35 +Small-Quantity-Order Revenue (TPC-H Q17) 5626.89 +Large Volume Customer (TPC-H Q18) 19220.56 +Discounted Revenue (TPC-H Q19) 1909.47 +Potential Part Promotion (TPC-H Q20) 1216.54 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2784.81 +Global Sales Opportunity Query (TPC-H Q22) 465.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 97.0 1.0 216.0 322.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.32 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4783.05 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 77 1 3 3085.71 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 30.9 0.21 0.03 2.27 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.41 0 0.23 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_1_summary.txt b/logs_tests/doc_tpch_testcase_databaseservice_1_summary.txt new file mode 100644 index 000000000..085d01b3c --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_1_summary.txt @@ -0,0 +1,112 @@ +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 765s + Code: 1734666830 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256232 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6184.88 +Minimum Cost Supplier Query (TPC-H Q2) 2121.70 +Shipping Priority (TPC-H Q3) 2477.89 +Order Priority Checking Query (TPC-H Q4) 3120.41 +Local Supplier Volume (TPC-H Q5) 2263.59 +Forecasting Revenue Change (TPC-H Q6) 1158.38 +Forecasting Revenue Change (TPC-H Q7) 2326.20 +National Market Share (TPC-H Q8) 1410.37 +Product Type Profit Measure (TPC-H Q9) 3187.17 +Forecasting Revenue Change (TPC-H Q10) 3063.93 +Important Stock Identification (TPC-H Q11) 561.36 +Shipping Modes and Order Priority (TPC-H Q12) 2457.12 +Customer Distribution (TPC-H Q13) 6562.69 +Forecasting Revenue Change (TPC-H Q14) 1280.33 +Top Supplier Query (TPC-H Q15) 1396.18 +Parts/Supplier Relationship (TPC-H Q16) 1346.35 +Small-Quantity-Order Revenue (TPC-H Q17) 5626.89 +Large Volume Customer (TPC-H Q18) 19220.56 +Discounted Revenue (TPC-H Q19) 1909.47 +Potential Part Promotion (TPC-H Q20) 1216.54 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2784.81 +Global Sales Opportunity Query (TPC-H Q22) 465.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 97.0 1.0 216.0 322.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.32 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4783.05 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 77 1 3 3085.71 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 30.9 0.21 0.03 2.27 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.41 0 0.23 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_2.log b/logs_tests/doc_tpch_testcase_databaseservice_2.log new file mode 100644 index 000000000..937a12c5c --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_2.log @@ -0,0 +1,166 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734667671 +Experiment : starts at 2024-12-20 05:07:50.927083 (4352472.334216012) +Experiment : This experiment compares run time and resource consumption of TPC-H queries in different DBMS. +TPC-H (SF=3) data is loaded and benchmark is executed. +Query ordering is Q1 - Q22. +All instances use the same query parameters. +Timeout per query is 1200. +Import sets indexes and constraints after loading and recomputes statistics. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is skipped. +Loading is tested with [8] threads, split into [8] pods. +Benchmarking is tested with [1] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-BHT-8 : will start now +- waiting 30s - : done +DatabaseService-BHT-8 : will start benchmarking but not before 2024-12-20 04:09:51 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-BHT-8 : will start benchmarking but not before 2024-12-20 04:09:51 +done +DatabaseService-BHT-8 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-BHT-8-1 : start benchmarking +Worker pods found: [] +DatabaseService-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734667671 +- waiting 10s - : done +DatabaseService-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-bht-8-1734667671-1-1-q9wg9: found +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +done +DatabaseService-BHT-8 : has running benchmarks +DatabaseService-BHT-8-1 : showing benchmarker times +DatabaseService-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 101 +DatabaseService-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1734667800, 1734667876)] +DatabaseService-BHT-8-1 : found and updated times {'benchmarker': [(1734667800, 1734667876)]} +DatabaseService-BHT-8-1 : collecting metrics of benchmarker +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734667671(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-BHT-8 : can be stopped +Experiment : ends at 2024-12-20 05:12:12.416402 (4352733.823536302) - 261.49s total +Join results done! +done! +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file + +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 262s + Code: 1734667671 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256216 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6149.40 +Minimum Cost Supplier Query (TPC-H Q2) 2110.09 +Shipping Priority (TPC-H Q3) 2435.87 +Order Priority Checking Query (TPC-H Q4) 3075.44 +Local Supplier Volume (TPC-H Q5) 2234.95 +Forecasting Revenue Change (TPC-H Q6) 1171.11 +Forecasting Revenue Change (TPC-H Q7) 2288.60 +National Market Share (TPC-H Q8) 1388.84 +Product Type Profit Measure (TPC-H Q9) 3168.23 +Forecasting Revenue Change (TPC-H Q10) 3075.63 +Important Stock Identification (TPC-H Q11) 563.29 +Shipping Modes and Order Priority (TPC-H Q12) 2453.85 +Customer Distribution (TPC-H Q13) 6242.59 +Forecasting Revenue Change (TPC-H Q14) 1271.74 +Top Supplier Query (TPC-H Q15) 1382.80 +Parts/Supplier Relationship (TPC-H Q16) 1349.50 +Small-Quantity-Order Revenue (TPC-H Q17) 5621.15 +Large Volume Customer (TPC-H Q18) 18750.06 +Discounted Revenue (TPC-H Q19) 1919.85 +Potential Part Promotion (TPC-H Q20) 1131.92 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2704.33 +Global Sales Opportunity Query (TPC-H Q22) 444.20 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 0 0 0 0 0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.29 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4850.83 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 76 1 3 3126.32 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.36 0 0.22 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_2_summary.txt b/logs_tests/doc_tpch_testcase_databaseservice_2_summary.txt new file mode 100644 index 000000000..e42c05b3e --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_2_summary.txt @@ -0,0 +1,108 @@ +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 262s + Code: 1734667671 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256216 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 6149.40 +Minimum Cost Supplier Query (TPC-H Q2) 2110.09 +Shipping Priority (TPC-H Q3) 2435.87 +Order Priority Checking Query (TPC-H Q4) 3075.44 +Local Supplier Volume (TPC-H Q5) 2234.95 +Forecasting Revenue Change (TPC-H Q6) 1171.11 +Forecasting Revenue Change (TPC-H Q7) 2288.60 +National Market Share (TPC-H Q8) 1388.84 +Product Type Profit Measure (TPC-H Q9) 3168.23 +Forecasting Revenue Change (TPC-H Q10) 3075.63 +Important Stock Identification (TPC-H Q11) 563.29 +Shipping Modes and Order Priority (TPC-H Q12) 2453.85 +Customer Distribution (TPC-H Q13) 6242.59 +Forecasting Revenue Change (TPC-H Q14) 1271.74 +Top Supplier Query (TPC-H Q15) 1382.80 +Parts/Supplier Relationship (TPC-H Q16) 1349.50 +Small-Quantity-Order Revenue (TPC-H Q17) 5621.15 +Large Volume Customer (TPC-H Q18) 18750.06 +Discounted Revenue (TPC-H Q19) 1919.85 +Potential Part Promotion (TPC-H Q20) 1131.92 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2704.33 +Global Sales Opportunity Query (TPC-H Q22) 444.20 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 0 0 0 0 0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.29 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 4850.83 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 76 1 3 3126.32 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 11.36 0 0.22 0.24 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_3.log b/logs_tests/doc_tpch_testcase_databaseservice_3.log new file mode 100644 index 000000000..b5d731714 --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_3.log @@ -0,0 +1,220 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734668021 +Experiment : starts at 2024-12-20 05:13:41.371484 (4352822.778614899) +Experiment : This experiment compares run time and resource consumption of TPC-H queries in different DBMS. +TPC-H (SF=3) data is loaded and benchmark is executed. +Query ordering is Q1 - Q22. +All instances use the same query parameters. +Timeout per query is 1200. +Import sets indexes and constraints after loading and recomputes statistics. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Database is persisted to disk of type shared and size 1Gi. +Loading is tested with [8] threads, split into [8] pods. +Benchmarking is tested with [1] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-BHT-8 : will start now +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : will start loading but not before 2024-12-20 04:17:43 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : will start loading but not before 2024-12-20 04:17:43 +done +DatabaseService-BHT-8 : is not loaded yet +DatabaseService-BHT-8 : start asynch loading scripts of type loaded +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +done +DatabaseService-BHT-8 : is loading +done +DatabaseService-BHT-8 : showing loader times +DatabaseService-BHT-8 : generator timespan (first to last [s]) = 1 +DatabaseService-BHT-8 : loader timespan (first to last [s]) = 123 +DatabaseService-BHT-8 : total timespan (first to last [s]) = 129 +DatabaseService-BHT-8 : start asynch loading scripts of type indexed +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +- waiting 30s - : done +DatabaseService-BHT-8 : is loading +done +DatabaseService-BHT-8 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-BHT-8-1 : start benchmarking +Worker pods found: [] +DatabaseService-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734668021 +- waiting 10s - : done +found +DatabaseService-BHT-8-1 : collecting metrics of data generator +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734668021(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +DatabaseService-BHT-8-1 : collecting metrics of data injector +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734668021(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +done +DatabaseService-BHT-8 : has running benchmarks +DatabaseService-BHT-8-1 : showing benchmarker times +DatabaseService-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 112 +DatabaseService-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1734668681, 1734668751)] +DatabaseService-BHT-8-1 : found and updated times {'benchmarker': [(1734668681, 1734668751)]} +DatabaseService-BHT-8-1 : collecting metrics of benchmarker +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734668021(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-BHT-8 : can be stopped +Experiment : ends at 2024-12-20 05:26:59.025144 (4353620.432277678) - 797.65s total +Join results done! +done! +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file + +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 798s + Code: 1734668021 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216892 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5793.75 +Minimum Cost Supplier Query (TPC-H Q2) 1922.31 +Shipping Priority (TPC-H Q3) 2231.66 +Order Priority Checking Query (TPC-H Q4) 2835.42 +Local Supplier Volume (TPC-H Q5) 2072.36 +Forecasting Revenue Change (TPC-H Q6) 1066.68 +Forecasting Revenue Change (TPC-H Q7) 2155.03 +National Market Share (TPC-H Q8) 1301.26 +Product Type Profit Measure (TPC-H Q9) 2787.60 +Forecasting Revenue Change (TPC-H Q10) 2791.42 +Important Stock Identification (TPC-H Q11) 559.95 +Shipping Modes and Order Priority (TPC-H Q12) 2143.46 +Customer Distribution (TPC-H Q13) 5364.32 +Forecasting Revenue Change (TPC-H Q14) 1190.47 +Top Supplier Query (TPC-H Q15) 1291.81 +Parts/Supplier Relationship (TPC-H Q16) 1127.44 +Small-Quantity-Order Revenue (TPC-H Q17) 4912.21 +Large Volume Customer (TPC-H Q18) 16174.10 +Discounted Revenue (TPC-H Q19) 1735.84 +Potential Part Promotion (TPC-H Q20) 1088.33 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2474.25 +Global Sales Opportunity Query (TPC-H Q22) 488.84 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5279.56 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 70 1 3 3394.29 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 41.77 0.04 0.02 2.8 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 15.95 0 0.26 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_3_summary.txt b/logs_tests/doc_tpch_testcase_databaseservice_3_summary.txt new file mode 100644 index 000000000..8b15671e3 --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_3_summary.txt @@ -0,0 +1,115 @@ +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 798s + Code: 1734668021 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216892 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5793.75 +Minimum Cost Supplier Query (TPC-H Q2) 1922.31 +Shipping Priority (TPC-H Q3) 2231.66 +Order Priority Checking Query (TPC-H Q4) 2835.42 +Local Supplier Volume (TPC-H Q5) 2072.36 +Forecasting Revenue Change (TPC-H Q6) 1066.68 +Forecasting Revenue Change (TPC-H Q7) 2155.03 +National Market Share (TPC-H Q8) 1301.26 +Product Type Profit Measure (TPC-H Q9) 2787.60 +Forecasting Revenue Change (TPC-H Q10) 2791.42 +Important Stock Identification (TPC-H Q11) 559.95 +Shipping Modes and Order Priority (TPC-H Q12) 2143.46 +Customer Distribution (TPC-H Q13) 5364.32 +Forecasting Revenue Change (TPC-H Q14) 1190.47 +Top Supplier Query (TPC-H Q15) 1291.81 +Parts/Supplier Relationship (TPC-H Q16) 1127.44 +Small-Quantity-Order Revenue (TPC-H Q17) 4912.21 +Large Volume Customer (TPC-H Q18) 16174.10 +Discounted Revenue (TPC-H Q19) 1735.84 +Potential Part Promotion (TPC-H Q20) 1088.33 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2474.25 +Global Sales Opportunity Query (TPC-H Q22) 488.84 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5279.56 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 70 1 3 3394.29 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 41.77 0.04 0.02 2.8 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 15.95 0 0.26 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_4.log b/logs_tests/doc_tpch_testcase_databaseservice_4.log new file mode 100644 index 000000000..df9c06c9c --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_4.log @@ -0,0 +1,170 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734668861 +Experiment : starts at 2024-12-20 05:27:41.707337 (4353663.114466471) +Experiment : This experiment compares run time and resource consumption of TPC-H queries in different DBMS. +TPC-H (SF=3) data is loaded and benchmark is executed. +Query ordering is Q1 - Q22. +All instances use the same query parameters. +Timeout per query is 1200. +Import sets indexes and constraints after loading and recomputes statistics. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Database is persisted to disk of type shared and size 1Gi. +Loading is tested with [8] threads, split into [8] pods. +Benchmarking is tested with [1] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-BHT-8 : will start now +DatabaseService-BHT-8 : storage exists bexhoma-storage-databaseservice-tpch-3 +DatabaseService-BHT-8 : loading is set to finished +- waiting 30s - : done +DatabaseService-BHT-8 : will start benchmarking but not before 2024-12-20 04:29:42 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-BHT-8 : will start benchmarking but not before 2024-12-20 04:29:42 +done +DatabaseService-BHT-8 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-BHT-8-1 : start benchmarking +Worker pods found: [] +DatabaseService-BHT-8 : benchmarking results in folder /home/perdelt/benchmarks/1734668861 +- waiting 10s - : done +DatabaseService-BHT-8 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-bht-8-1734668861-1-1-8l88l: .found +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +- waiting 30s - : done +DatabaseService-BHT-8 : has running benchmarks +done +DatabaseService-BHT-8 : has running benchmarks +DatabaseService-BHT-8-1 : showing benchmarker times +DatabaseService-BHT-8-1 : benchmarker timespan (start to end single container [s]) = 112 +DatabaseService-BHT-8-1 : benchmarker times (start/end per pod and container) = [(1734668997, 1734669068)] +DatabaseService-BHT-8-1 : found and updated times {'benchmarker': [(1734668997, 1734669068)]} +DatabaseService-BHT-8-1 : collecting metrics of benchmarker +DatabaseService-BHT-8-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-bht-8-1734668861(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-BHT-8 : can be stopped +Experiment : ends at 2024-12-20 05:32:13.728204 (4353935.135337864) - 272.02s total +Join results done! +done! +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file + +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 273s + Code: 1734668861 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216876 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5785.36 +Minimum Cost Supplier Query (TPC-H Q2) 1938.19 +Shipping Priority (TPC-H Q3) 2255.59 +Order Priority Checking Query (TPC-H Q4) 2827.21 +Local Supplier Volume (TPC-H Q5) 2018.18 +Forecasting Revenue Change (TPC-H Q6) 1062.37 +Forecasting Revenue Change (TPC-H Q7) 2075.73 +National Market Share (TPC-H Q8) 1336.07 +Product Type Profit Measure (TPC-H Q9) 2808.84 +Forecasting Revenue Change (TPC-H Q10) 2804.71 +Important Stock Identification (TPC-H Q11) 583.68 +Shipping Modes and Order Priority (TPC-H Q12) 2137.04 +Customer Distribution (TPC-H Q13) 5569.93 +Forecasting Revenue Change (TPC-H Q14) 1130.83 +Top Supplier Query (TPC-H Q15) 1321.32 +Parts/Supplier Relationship (TPC-H Q16) 1239.32 +Small-Quantity-Order Revenue (TPC-H Q17) 5228.78 +Large Volume Customer (TPC-H Q18) 17602.90 +Discounted Revenue (TPC-H Q19) 1735.06 +Potential Part Promotion (TPC-H Q20) 1018.27 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2464.08 +Global Sales Opportunity Query (TPC-H Q22) 451.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5249.82 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 71 1 3 3346.48 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 16.0 0 0.25 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_tpch_testcase_databaseservice_4_summary.txt b/logs_tests/doc_tpch_testcase_databaseservice_4_summary.txt new file mode 100644 index 000000000..322c742e9 --- /dev/null +++ b/logs_tests/doc_tpch_testcase_databaseservice_4_summary.txt @@ -0,0 +1,110 @@ +## Show Summary + +### Workload +TPC-H Queries SF=3 + Type: tpch + Duration: 273s + Code: 1734668861 + This includes the reading queries of TPC-H. + This experiment compares run time and resource consumption of TPC-H queries in different DBMS. + TPC-H (SF=3) data is loaded and benchmark is executed. + Query ordering is Q1 - Q22. + All instances use the same query parameters. + Timeout per query is 1200. + Import sets indexes and constraints after loading and recomputes statistics. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [8] threads, split into [8] pods. + Benchmarking is tested with [1] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-BHT-8-1-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249216876 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Errors (failed queries) +No errors + +### Warnings (result mismatch) +No warnings + +### Latency of Timer Execution [ms] +DBMS DatabaseService-BHT-8-1-1 +Pricing Summary Report (TPC-H Q1) 5785.36 +Minimum Cost Supplier Query (TPC-H Q2) 1938.19 +Shipping Priority (TPC-H Q3) 2255.59 +Order Priority Checking Query (TPC-H Q4) 2827.21 +Local Supplier Volume (TPC-H Q5) 2018.18 +Forecasting Revenue Change (TPC-H Q6) 1062.37 +Forecasting Revenue Change (TPC-H Q7) 2075.73 +National Market Share (TPC-H Q8) 1336.07 +Product Type Profit Measure (TPC-H Q9) 2808.84 +Forecasting Revenue Change (TPC-H Q10) 2804.71 +Important Stock Identification (TPC-H Q11) 583.68 +Shipping Modes and Order Priority (TPC-H Q12) 2137.04 +Customer Distribution (TPC-H Q13) 5569.93 +Forecasting Revenue Change (TPC-H Q14) 1130.83 +Top Supplier Query (TPC-H Q15) 1321.32 +Parts/Supplier Relationship (TPC-H Q16) 1239.32 +Small-Quantity-Order Revenue (TPC-H Q17) 5228.78 +Large Volume Customer (TPC-H Q18) 17602.90 +Discounted Revenue (TPC-H Q19) 1735.06 +Potential Part Promotion (TPC-H Q20) 1018.27 +Suppliers Who Kept Orders Waiting Query (TPC-H Q21) 2464.08 +Global Sales Opportunity Query (TPC-H Q22) 451.76 + +### Loading [s] + timeGenerate timeIngesting timeSchema timeIndex timeLoad +DatabaseService-BHT-8-1-1 1.0 123.0 1.0 196.0 326.0 + +### Geometric Mean of Medians of Timer Run [s] + Geo Times [s] +DBMS +DatabaseService-BHT-8-1-1 2.11 + +### Power@Size + Power@Size [~Q/h] +DBMS +DatabaseService-BHT-8-1-1 5249.82 + +### Throughput@Size + time [s] count SF Throughput@Size [~GB/h] +DBMS SF num_experiment num_client +DatabaseService-BHT-8-1 3 1 1 71 1 3 3346.48 + +### Workflow + +#### Actual +DBMS DatabaseService-BHT-8 - Pods [[1]] + +#### Planned +DBMS DatabaseService-BHT-8 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-BHT-8-1 16.0 0 0.25 0.27 + +### Tests +TEST passed: Geo Times [s] contains no 0 or NaN +TEST passed: Power@Size [~Q/h] contains no 0 or NaN +TEST passed: Throughput@Size [~GB/h] contains no 0 or NaN +TEST passed: No SQL errors +TEST passed: No SQL warnings +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_cockroachdb_1.log b/logs_tests/doc_ycsb_cockroachdb_1.log index 5259a6d7d..704d6146e 100644 --- a/logs_tests/doc_ycsb_cockroachdb_1.log +++ b/logs_tests/doc_ycsb_cockroachdb_1.log @@ -2,10 +2,10 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730301195 -Experiment : starts at 2024-10-30 16:13:14.978661 (5037135.102640003) +Experiment : has code 1734645173 +Experiment : starts at 2024-12-19 22:52:53.295133 (4329974.702264299) Experiment : This experiment compares run time and resource consumption of YCSB queries. -Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. +Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. System metrics are monitored by a cluster-wide installation. Benchmark is limited to DBMS ['CockroachDB']. @@ -13,105 +13,58 @@ Import is handled by 8 processes (pods). Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. -Database is persisted to disk of type shared and size 30Gi. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. Experiment is run once. Cluster monitoring : is running -Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST" -Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST" +Warning: spec.template.spec.containers[1].env[4]: hides previous definition of "BEXHOMA_WORKER_LIST", which may be dropped when using apply +Warning: spec.template.spec.containers[0].env[2]: hides previous definition of "BEXHOMA_WORKER_FIRST", which may be dropped when using apply done CockroachDB-64-8-65536 : will start now -[{'metadata': {'name': 'bexhoma-workers', 'labels': {'app': 'bexhoma', 'component': 'worker', 'configuration': 'default', 'experiment': 'default'}}, 'spec': {'accessModes': ['ReadWriteOnce'], 'resources': {'requests': {'storage': '100Gi'}}, 'storageClassName': 'shared'}}] - waiting 30s - : done CockroachDB-64-8-65536 : is not loaded yet -CockroachDB-64-8-65536 : will start loading but not before 2024-10-30 15:16:15 (that is in 120 secs) +CockroachDB-64-8-65536 : will start loading but not before 2024-12-19 21:55:54 (that is in 120 secs) - waiting 30s - : done CockroachDB-64-8-65536 : is not loaded yet -CockroachDB-64-8-65536 : will start loading but not before 2024-10-30 15:16:15 +CockroachDB-64-8-65536 : will start loading but not before 2024-12-19 21:55:54 - waiting 30s - : done CockroachDB-64-8-65536 : is not loaded yet -CockroachDB-64-8-65536 : will start loading but not before 2024-10-30 15:16:15 +CockroachDB-64-8-65536 : will start loading but not before 2024-12-19 21:55:54 - waiting 30s - : done CockroachDB-64-8-65536 : is not loaded yet -CockroachDB-64-8-65536 : will start loading but not before 2024-10-30 15:16:15 +CockroachDB-64-8-65536 : will start loading but not before 2024-12-19 21:55:54 - waiting 30s - : done CockroachDB-64-8-65536 : is not loaded yet done +Worker pods found: ['bexhoma-worker-cockroachdb-64-8-65536-1734645173-0', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-1', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-2'] CockroachDB-64-8-65536 Workers 3 of 3 +Worker pods found: ['bexhoma-worker-cockroachdb-64-8-65536-1734645173-0', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-1', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-2'] CockroachDB-64-8-65536 : start asynch loading scripts of type loaded CockroachDB-64-8-65536 : is loading - waiting 30s - : done CockroachDB-64-8-65536 : is loading - waiting 30s - : done CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -- waiting 30s - : done -CockroachDB-64-8-65536 : is loading -done -CockroachDB-64-8-65536 : is loading -Error from server (NotFound): persistentvolumeclaims "bexhoma-storage-cockroachdb-64-8-65536-ycsb-1" not found -Error from server (NotFound): persistentvolumeclaims "bexhoma-storage-cockroachdb-64-8-65536-ycsb-1" not found done CockroachDB-64-8-65536 : showing loader times -CockroachDB-64-8-65536 : loader timespan (first to last [s]) = 847 +CockroachDB-64-8-65536 : loader timespan (first to last [s]) = 63 CockroachDB-64-8-65536 : benchmarks done 0 of 1. This will be client 1 -CockroachDB-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 1000000, 'YCSB_BATCHSIZE': ''} +CockroachDB-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} CockroachDB-64-8-65536-1 : start benchmarking -CockroachDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1730301195 +Worker pods found: ['bexhoma-worker-cockroachdb-64-8-65536-1734645173-0', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-1', 'bexhoma-worker-cockroachdb-64-8-65536-1734645173-2'] +CockroachDB-64-8-65536 : distributed system - get host info for worker bexhoma-worker-cockroachdb-64-8-65536-1734645173-0 +CockroachDB-64-8-65536 : distributed system - get host info for worker bexhoma-worker-cockroachdb-64-8-65536-1734645173-1 +CockroachDB-64-8-65536 : distributed system - get host info for worker bexhoma-worker-cockroachdb-64-8-65536-1734645173-2 +CockroachDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734645173 - waiting 10s - : done found CockroachDB-64-8-65536-1 : collecting loading metrics of SUT +CockroachDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)cockroachdb-64-8-65536-1734645173(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} CockroachDB-64-8-65536-1 : collecting metrics of data generator +CockroachDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)cockroachdb-64-8-65536-1734645173(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} CockroachDB-64-8-65536-1 : collecting metrics of data injector +CockroachDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)cockroachdb-64-8-65536-1734645173(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done CockroachDB-64-8-65536 : has running benchmarks - waiting 30s - : done @@ -156,43 +109,22 @@ CockroachDB-64-8-65536 : has running benchmarks CockroachDB-64-8-65536 : has running benchmarks - waiting 30s - : done CockroachDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -CockroachDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -CockroachDB-64-8-65536 : has running benchmarks -Error from server (NotFound): persistentvolumeclaims "bexhoma-storage-cockroachdb-64-8-65536-ycsb-1" not found done CockroachDB-64-8-65536 : has running benchmarks CockroachDB-64-8-65536-1 : showing benchmarker times -CockroachDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 781 -CockroachDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1730302281, 1730303029)] -CockroachDB-64-8-65536-1 : found and updated times {'benchmarker': [(1730302281, 1730303029)]} +CockroachDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 720 +CockroachDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1734645467, 1734646176)] +CockroachDB-64-8-65536-1 : found and updated times {'benchmarker': [(1734645467, 1734646176)]} CockroachDB-64-8-65536-1 : collecting execution metrics of SUT +CockroachDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)cockroachdb-64-8-65536-1734645173(.*)", container="dbms"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} CockroachDB-64-8-65536-1 : collecting metrics of benchmarker +CockroachDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)cockroachdb-64-8-65536-1734645173(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} done CockroachDB-64-8-65536 : can be stopped -Experiment : ends at 2024-10-30 16:45:06.419442 (5039046.543422923) - 1911.44s total +Experiment : ends at 2024-12-19 23:10:29.231723 (4331030.638857118) - 1055.94s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... [UPDATE].Return=OK -connection_pod ... -CockroachDB-64-8-65536-1-1 CockroachDB-64-8-65536-1 ... 500453 - -[1 rows x 43 columns] -Workflow {'CockroachDB-64-8-65536': [[1]]} -Loading connection ... [INSERT].Return=OK -connection_pod ... -CockroachDB-64-8-65536-1 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-2 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-3 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-4 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-5 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-6 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-7 CockroachDB-64-8-65536 ... 125000 -CockroachDB-64-8-65536-8 CockroachDB-64-8-65536 ... 125000 - -[8 rows x 36 columns] Result workflow complete ## Show Summary @@ -200,11 +132,11 @@ Result workflow complete ### Workload YCSB SF=1 Type: ycsb - Duration: 1912s - Code: 1730301195 - This includes no queries. YCSB runs the benchmark + Duration: 1056s + Code: 1734645173 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. - Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. System metrics are monitored by a cluster-wide installation. Benchmark is limited to DBMS ['CockroachDB']. @@ -212,7 +144,6 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. - Database is persisted to disk of type shared and size 30Gi. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -220,60 +151,48 @@ YCSB SF=1 ### Connections CockroachDB-64-8-65536-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254908644 + disk:249215592 requests_cpu:4 requests_memory:16Gi worker 0 - RAM:1081965535232 - CPU: - GPU: - GPUIDs:[] + RAM:1081966526464 Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker27 - disk:726928680 - datadisk:107393516 - volume_size:30G - volume_used:1.5G - cuda: + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:446476124 + datadisk:116276640 + volume_size:1000G + volume_used:109G worker 1 - RAM:1081750962176 - CPU: - GPU: - GPUIDs:[] + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:634371096 + datadisk:116064760 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081751019520 Cores:128 - host:5.15.0-122-generic + host:5.15.0-126-generic node:cl-worker29 - disk:391582960 - datadisk:107344022 - volume_size:30G - volume_used:1.6G - cuda: - worker 2 - RAM:1081966493696 - CPU: - GPU: - GPUIDs:[] - Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker28 - disk:676774188 - datadisk:107343598 - volume_size:30G - volume_used:1.6G - cuda: + disk:153231576 + datadisk:116065120 + volume_size:1000G + volume_used:109G ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -CockroachDB-64-8-65536 1 64 65536 8 1185.98666 845744.0 1000000 255295.0 +CockroachDB-64-8-65536 1 64 65536 8 16211.343884 61937.0 1000000 7579.5 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -CockroachDB-64-8-65536-1 1 64 65536 1 1337.57 747625.0 499547 71359.0 500453 1549311.0 +CockroachDB-64-8-65536-1 1 64 65536 1 14106.68 708884.0 5000094 5851.0 4999906 130879.0 ### Workflow @@ -285,19 +204,19 @@ DBMS CockroachDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 2363.41 2.54 4.95 9.51 +CockroachDB-64-8-65536-1 888.04 0.03 3.05 5.76 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 227.61 0.15 4.56 4.58 +CockroachDB-64-8-65536-1 103.7 0 4.34 4.37 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 3054.28 2.11 7.28 12.92 +CockroachDB-64-8-65536-1 20657.12 20.74 12.51 26.94 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 138.39 0.21 0.58 0.58 +CockroachDB-64-8-65536-1 1024.62 1.56 0.6 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_cockroachdb_1_summary.txt b/logs_tests/doc_ycsb_cockroachdb_1_summary.txt index 361e49094..c9f5499cc 100644 --- a/logs_tests/doc_ycsb_cockroachdb_1_summary.txt +++ b/logs_tests/doc_ycsb_cockroachdb_1_summary.txt @@ -3,11 +3,11 @@ ### Workload YCSB SF=1 Type: ycsb - Duration: 1912s - Code: 1730301195 - This includes no queries. YCSB runs the benchmark + Duration: 1056s + Code: 1734645173 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. - Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. System metrics are monitored by a cluster-wide installation. Benchmark is limited to DBMS ['CockroachDB']. @@ -15,7 +15,6 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. - Database is persisted to disk of type shared and size 30Gi. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -23,60 +22,48 @@ YCSB SF=1 ### Connections CockroachDB-64-8-65536-1 uses docker image cockroachdb/cockroach:v24.2.4 - RAM:541008605184 + RAM:541008576512 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254908644 + disk:249215592 requests_cpu:4 requests_memory:16Gi worker 0 - RAM:1081965535232 - CPU: - GPU: - GPUIDs:[] + RAM:1081966526464 Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker27 - disk:726928680 - datadisk:107393516 - volume_size:30G - volume_used:1.5G - cuda: + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:446476124 + datadisk:116276640 + volume_size:1000G + volume_used:109G worker 1 - RAM:1081750962176 - CPU: - GPU: - GPUIDs:[] + RAM:1081965555712 + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:634371096 + datadisk:116064760 + volume_size:1000G + volume_used:109G + worker 2 + RAM:1081751019520 Cores:128 - host:5.15.0-122-generic + host:5.15.0-126-generic node:cl-worker29 - disk:391582960 - datadisk:107344022 - volume_size:30G - volume_used:1.6G - cuda: - worker 2 - RAM:1081966493696 - CPU: - GPU: - GPUIDs:[] - Cores:256 - host:5.15.0-1060-nvidia - node:cl-worker28 - disk:676774188 - datadisk:107343598 - volume_size:30G - volume_used:1.6G - cuda: + disk:153231576 + datadisk:116065120 + volume_size:1000G + volume_used:109G ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -CockroachDB-64-8-65536 1 64 65536 8 1185.98666 845744.0 1000000 255295.0 +CockroachDB-64-8-65536 1 64 65536 8 16211.343884 61937.0 1000000 7579.5 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -CockroachDB-64-8-65536-1 1 64 65536 1 1337.57 747625.0 499547 71359.0 500453 1549311.0 +CockroachDB-64-8-65536-1 1 64 65536 1 14106.68 708884.0 5000094 5851.0 4999906 130879.0 ### Workflow @@ -88,19 +75,19 @@ DBMS CockroachDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 2363.41 2.54 4.95 9.51 +CockroachDB-64-8-65536-1 888.04 0.03 3.05 5.76 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 227.61 0.15 4.56 4.58 +CockroachDB-64-8-65536-1 103.7 0 4.34 4.37 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 3054.28 2.11 7.28 12.92 +CockroachDB-64-8-65536-1 20657.12 20.74 12.51 26.94 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -CockroachDB-64-8-65536-1 138.39 0.21 0.58 0.58 +CockroachDB-64-8-65536-1 1024.62 1.56 0.6 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_databaseservice_1.log b/logs_tests/doc_ycsb_databaseservice_1.log new file mode 100644 index 000000000..dfe50e1bf --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_1.log @@ -0,0 +1,116 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734663459 +Experiment : starts at 2024-12-20 03:57:38.591399 (4348259.998533409) +Experiment : This experiment compares run time and resource consumption of YCSB queries. +Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. +YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is tested with [64] threads, split into [8] pods. +Benchmarking is tested with [64] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-64-8-65536 : will start now +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : will start loading but not before 2024-12-20 03:01:10 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : will start loading but not before 2024-12-20 03:01:10 +done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : start asynch loading scripts of type loaded +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +done +DatabaseService-64-8-65536 : showing loader times +DatabaseService-64-8-65536 : loader timespan (first to last [s]) = 21 +DatabaseService-64-8-65536 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 1000000, 'YCSB_BATCHSIZE': ''} +DatabaseService-64-8-65536-1 : start benchmarking +Worker pods found: [] +DatabaseService-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734663459 +- waiting 10s - : done +DatabaseService-64-8-65536 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-64-8-65536-1734663459-dhjg4: found +done +DatabaseService-64-8-65536 : has running benchmarks +DatabaseService-64-8-65536-1 : showing benchmarker times +DatabaseService-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 41 +DatabaseService-64-8-65536-1 : benchmarker times (start/end per pod and container) = [] +DatabaseService-64-8-65536-1 : found and updated times {'benchmarker': []} +done +DatabaseService-64-8-65536 : can be stopped +Experiment : ends at 2024-12-20 04:03:32.296496 (4348613.703629796) - 353.71s total +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file +Result workflow complete + +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 354s + Code: 1734663459 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256012 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 49973.150502 20251.0 1000000 25397.0 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 55202.87 18115.0 499487 2095.0 500513 42239.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_databaseservice_1_summary.txt b/logs_tests/doc_ycsb_databaseservice_1_summary.txt new file mode 100644 index 000000000..4f55a81b2 --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_1_summary.txt @@ -0,0 +1,53 @@ +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 354s + Code: 1734663459 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 1000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256012 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 49973.150502 20251.0 1000000 25397.0 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 55202.87 18115.0 499487 2095.0 500513 42239.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_databaseservice_2.log b/logs_tests/doc_ycsb_databaseservice_2.log new file mode 100644 index 000000000..64ce17075 --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_2.log @@ -0,0 +1,114 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734663819 +Experiment : starts at 2024-12-20 04:03:38.820550 (4348620.227677921) +Experiment : This experiment compares run time and resource consumption of YCSB queries. +Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. +YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Loading is skipped. +Loading is tested with [64] threads, split into [8] pods. +Benchmarking is tested with [64] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +done +DatabaseService-64-8-65536 : will start now +- waiting 30s - : done +DatabaseService-64-8-65536 : will start benchmarking but not before 2024-12-20 03:05:39 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-64-8-65536 : will start benchmarking but not before 2024-12-20 03:05:39 +done +DatabaseService-64-8-65536 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} +DatabaseService-64-8-65536-1 : start benchmarking +Worker pods found: [] +DatabaseService-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734663819 +- waiting 10s - : done +DatabaseService-64-8-65536 : benchmarking is waiting for job bexhoma-benchmarker-databaseservice-64-8-65536-1734663819-bxk2f: found +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +done +DatabaseService-64-8-65536 : has running benchmarks +DatabaseService-64-8-65536-1 : showing benchmarker times +DatabaseService-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 162 +DatabaseService-64-8-65536-1 : benchmarker times (start/end per pod and container) = [] +DatabaseService-64-8-65536-1 : found and updated times {'benchmarker': []} +DatabaseService-64-8-65536-1 : collecting metrics of benchmarker +DatabaseService-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-64-8-65536-1734663819(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-64-8-65536 : can be stopped +Experiment : ends at 2024-12-20 04:08:59.884356 (4348941.291489666) - 321.06s total +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file +Result workflow complete + +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 322s + Code: 1734663819 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256004 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 64975.99 153903.0 4999780 2423.0 5000220 30719.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 745.84 6.07 0.6 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_databaseservice_2_summary.txt b/logs_tests/doc_ycsb_databaseservice_2_summary.txt new file mode 100644 index 000000000..ef47b3001 --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_2_summary.txt @@ -0,0 +1,55 @@ +## Show Summary + +### Workload +YCSB SF=1 + Type: ycsb + Duration: 322s + Code: 1734663819 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Loading is skipped. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:249256004 + datadisk:39348 + requests_cpu:4 + requests_memory:16Gi + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 64975.99 153903.0 4999780 2423.0 5000220 30719.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 745.84 6.07 0.6 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_databaseservice_3.log b/logs_tests/doc_ycsb_databaseservice_3.log new file mode 100644 index 000000000..81284bd0b --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_3.log @@ -0,0 +1,360 @@ +Data Directory : is running +Result Directory : is running +Dashboard : is running +Message Queue : is running +Experiment : has code 1734700853 +Experiment : starts at 2024-12-20 14:20:53.019251 (4385654.426380379) +Experiment : This experiment compares run time and resource consumption of YCSB queries. +Workload is 'A'. Number of rows to insert is 5000000. Number of operations is 10000000. Batch size is ''. +YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. +System metrics are monitored by a cluster-wide installation. +Benchmark is limited to DBMS ['DatabaseService']. +Import is handled by 8 processes (pods). +Loading is fixed to cl-worker19. +Benchmarking is fixed to cl-worker19. +SUT is fixed to cl-worker11. +Database is persisted to disk of type shared and size 1Gi. +Loading is tested with [64] threads, split into [8] pods. +Benchmarking is tested with [64] threads, split into [1] pods. +Benchmarking is run as [1] times the number of benchmarking pods. +Experiment is run once. +Cluster monitoring : is running +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +- waiting 30s - : done +DatabaseService-64-8-65536 : has to wait - 2 running and 0 pending pods: max is 2 pods per cluster +done +DatabaseService-64-8-65536 : will start now +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : waits for health check to succeed +- waiting 30s - : done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : will start loading but not before 2024-12-20 14:16:07 (that is in 60 secs) +- waiting 30s - : done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : will start loading but not before 2024-12-20 14:16:07 +done +DatabaseService-64-8-65536 : is not loaded yet +DatabaseService-64-8-65536 : start asynch loading scripts of type loaded +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +- waiting 30s - : done +DatabaseService-64-8-65536 : is loading +done +DatabaseService-64-8-65536 : showing loader times +DatabaseService-64-8-65536 : loader timespan (first to last [s]) = 145 +DatabaseService-64-8-65536 : benchmarks done 0 of 1. This will be client 1 +DatabaseService-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '5', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 5000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} +DatabaseService-64-8-65536-1 : start benchmarking +Worker pods found: [] +DatabaseService-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734700853 +- waiting 10s - : done +found +DatabaseService-64-8-65536-1 : collecting metrics of data generator +DatabaseService-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-64-8-65536-1734700853(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +DatabaseService-64-8-65536-1 : collecting metrics of data injector +DatabaseService-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-64-8-65536-1734700853(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +- waiting 30s - : done +DatabaseService-64-8-65536 : has running benchmarks +done +DatabaseService-64-8-65536 : has running benchmarks +DatabaseService-64-8-65536-1 : showing benchmarker times +DatabaseService-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 171 +DatabaseService-64-8-65536-1 : benchmarker times (start/end per pod and container) = [] +DatabaseService-64-8-65536-1 : found and updated times {'benchmarker': []} +DatabaseService-64-8-65536-1 : collecting metrics of benchmarker +DatabaseService-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)databaseservice-64-8-65536-1734700853(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} +done +DatabaseService-64-8-65536 : can be stopped +Experiment : ends at 2024-12-20 15:22:50.227514 (4389371.634647944) - 3717.21s total +Experiment : downloading partial results +Experiment : uploading full results +Experiment : uploading workload file +Result workflow complete + +## Show Summary + +### Workload +YCSB SF=5 + Type: ycsb + Duration: 3718s + Code: 1734700853 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 5000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250060572 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 34656.857878 145263.0 5000000 6815.5 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 65353.5 153014.0 4997965 653.0 5002035 1296.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 397.12 0.88 4.6 4.62 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 743.75 5.11 0.6 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_databaseservice_3_summary.txt b/logs_tests/doc_ycsb_databaseservice_3_summary.txt new file mode 100644 index 000000000..62da3987a --- /dev/null +++ b/logs_tests/doc_ycsb_databaseservice_3_summary.txt @@ -0,0 +1,67 @@ +## Show Summary + +### Workload +YCSB SF=5 + Type: ycsb + Duration: 3718s + Code: 1734700853 + YCSB tool runs the benchmark. + This experiment compares run time and resource consumption of YCSB queries. + Workload is 'A'. Number of rows to insert is 5000000. Number of operations is 10000000. Batch size is ''. + YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. + System metrics are monitored by a cluster-wide installation. + Benchmark is limited to DBMS ['DatabaseService']. + Import is handled by 8 processes (pods). + Loading is fixed to cl-worker19. + Benchmarking is fixed to cl-worker19. + SUT is fixed to cl-worker11. + Database is persisted to disk of type shared and size 1Gi. + Loading is tested with [64] threads, split into [8] pods. + Benchmarking is tested with [64] threads, split into [1] pods. + Benchmarking is run as [1] times the number of benchmarking pods. + Experiment is run once. + +### Connections +DatabaseService-64-8-65536-1 uses docker image postgres:16.1 + RAM:541008576512 + CPU:AMD Opteron(tm) Processor 6378 + Cores:64 + host:5.15.0-126-generic + node:cl-worker11 + disk:250060572 + datadisk:39192 + volume_size:1.0G + volume_used:36M + requests_cpu:4 + requests_memory:16Gi + +### Loading + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) +DatabaseService-64-8-65536 1 64 65536 8 34656.857878 145263.0 5000000 6815.5 + +### Execution + experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) +DatabaseService-64-8-65536-1 1 64 65536 1 65353.5 153014.0 4997965 653.0 5002035 1296.0 + +### Workflow + +#### Actual +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +#### Planned +DBMS DatabaseService-64-8-65536 - Pods [[1]] + +### Ingestion - Loader + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 397.12 0.88 4.6 4.62 + +### Execution - Benchmarker + CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] +DatabaseService-64-8-65536-1 743.75 5.11 0.6 0.61 + +### Tests +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN +TEST passed: Ingestion Loader contains no 0 or NaN in CPU [CPUs] +TEST passed: Execution Benchmarker contains no 0 or NaN in CPU [CPUs] +TEST passed: Workflow as planned diff --git a/logs_tests/doc_ycsb_yugabytedb_1.log b/logs_tests/doc_ycsb_yugabytedb_1.log index 4cec0a7bc..ed7835bd9 100644 --- a/logs_tests/doc_ycsb_yugabytedb_1.log +++ b/logs_tests/doc_ycsb_yugabytedb_1.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730222076 -Experiment : starts at 2024-10-29 18:14:36.469219 (4958016.593197697) +Experiment : has code 1734625544 +Experiment : starts at 2024-12-19 17:25:44.048070 (4310345.455200298) Experiment : This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -22,42 +22,34 @@ done YugabyteDB-64-8-65536 : will start now - waiting 30s - : done YugabyteDB-64-8-65536 : is not loaded yet -YugabyteDB-64-8-65536 : will start loading but not before 2024-10-29 17:16:37 (that is in 60 secs) +YugabyteDB-64-8-65536 : will start loading but not before 2024-12-19 16:27:44 (that is in 60 secs) - waiting 30s - : done YugabyteDB-64-8-65536 : is not loaded yet -YugabyteDB-64-8-65536 : will start loading but not before 2024-10-29 17:16:37 +YugabyteDB-64-8-65536 : will start loading but not before 2024-12-19 16:27:44 done YugabyteDB-64-8-65536 : is not loaded yet YugabyteDB-64-8-65536 : start asynch loading scripts of type loaded YugabyteDB-64-8-65536 : is loading - waiting 30s - : done YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading done YugabyteDB-64-8-65536 : showing loader times -YugabyteDB-64-8-65536 : loader timespan (first to last [s]) = 61 +YugabyteDB-64-8-65536 : loader timespan (first to last [s]) = 36 YugabyteDB-64-8-65536 : benchmarks done 0 of 1. This will be client 1 YugabyteDB-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} YugabyteDB-64-8-65536-1 : start benchmarking -YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1730222076 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734625544 - waiting 10s - : done found YugabyteDB-64-8-65536-1 : collecting loading metrics of SUT +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yb-tserver-(.*)"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of data generator +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734625544(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of data injector -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734625544(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done YugabyteDB-64-8-65536 : has running benchmarks - waiting 30s - : done @@ -79,35 +71,19 @@ YugabyteDB-64-8-65536 : has running benchmarks done YugabyteDB-64-8-65536 : has running benchmarks YugabyteDB-64-8-65536-1 : showing benchmarker times -YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 508 -YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1730222299, 1730222798)] -YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1730222299, 1730222798)]} +YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 327 +YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1734625740, 1734626054)] +YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1734625740, 1734626054)]} YugabyteDB-64-8-65536-1 : collecting execution metrics of SUT +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yb-tserver-(.*)"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of benchmarker +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734625544(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} done YugabyteDB-64-8-65536 : can be stopped -Experiment : ends at 2024-10-29 18:27:28.674626 (4958788.798604877) - 772.21s total +Experiment : ends at 2024-12-19 17:35:05.156119 (4310906.563252344) - 561.11s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... [UPDATE].Return=OK -connection_pod ... -YugabyteDB-64-8-65536-1-1 YugabyteDB-64-8-65536-1 ... 4998400 - -[1 rows x 43 columns] -Workflow {'YugabyteDB-64-8-65536': [[1]]} -Loading connection ... [INSERT].Return=OK -connection_pod ... -YugabyteDB-64-8-65536-1 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-2 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-3 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-4 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-5 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-6 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-7 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-8 YugabyteDB-64-8-65536 ... 125000 - -[8 rows x 36 columns] Result workflow complete ## Show Summary @@ -115,9 +91,9 @@ Result workflow complete ### Workload YCSB SF=1 Type: ycsb - Duration: 773s - Code: 1730222076 - This includes no queries. YCSB runs the benchmark + Duration: 562s + Code: 1734625544 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -134,23 +110,44 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319416 + disk:249253840 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439206828 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122936080 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584264864 ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -YugabyteDB-64-8-65536 1 64 65536 8 16556.163255 60725.0 1000000 61163.0 +YugabyteDB-64-8-65536 1 64 65536 8 28456.559524 35509.0 1000000 15762.0 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 20041.33 498969.0 5001600 63903.0 4998400 65599.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31861.75 313856.0 4998066 37663.0 5001934 43039.0 ### Workflow @@ -162,19 +159,19 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 2423.24 13.34 12.39 16.56 +YugabyteDB-64-8-65536-1 925.63 4.64 1.75 5.32 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 52.99 0 2.83 2.87 +YugabyteDB-64-8-65536-1 0.09 0 0.01 0.01 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19524.15 26.08 14.07 24.03 +YugabyteDB-64-8-65536-1 13499.7 15.99 4.92 16.26 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 934.46 2.07 0.61 0.61 +YugabyteDB-64-8-65536-1 932.99 3.2 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_yugabytedb_1_summary.txt b/logs_tests/doc_ycsb_yugabytedb_1_summary.txt index e46023f8f..d9b9a21f4 100644 --- a/logs_tests/doc_ycsb_yugabytedb_1_summary.txt +++ b/logs_tests/doc_ycsb_yugabytedb_1_summary.txt @@ -3,9 +3,9 @@ ### Workload YCSB SF=1 Type: ycsb - Duration: 773s - Code: 1730222076 - This includes no queries. YCSB runs the benchmark + Duration: 562s + Code: 1734625544 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -22,23 +22,44 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319416 + disk:249253840 datadisk:39428 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439206828 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122936080 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584264864 ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -YugabyteDB-64-8-65536 1 64 65536 8 16556.163255 60725.0 1000000 61163.0 +YugabyteDB-64-8-65536 1 64 65536 8 28456.559524 35509.0 1000000 15762.0 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 20041.33 498969.0 5001600 63903.0 4998400 65599.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31861.75 313856.0 4998066 37663.0 5001934 43039.0 ### Workflow @@ -50,19 +71,19 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 2423.24 13.34 12.39 16.56 +YugabyteDB-64-8-65536-1 925.63 4.64 1.75 5.32 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 52.99 0 2.83 2.87 +YugabyteDB-64-8-65536-1 0.09 0 0.01 0.01 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19524.15 26.08 14.07 24.03 +YugabyteDB-64-8-65536-1 13499.7 15.99 4.92 16.26 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 934.46 2.07 0.61 0.61 +YugabyteDB-64-8-65536-1 932.99 3.2 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_yugabytedb_2.log b/logs_tests/doc_ycsb_yugabytedb_2.log index 7c0626212..541adb0dc 100644 --- a/logs_tests/doc_ycsb_yugabytedb_2.log +++ b/logs_tests/doc_ycsb_yugabytedb_2.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730223222 -Experiment : starts at 2024-10-29 18:33:42.187131 (4959162.311106925) +Experiment : has code 1734626144 +Experiment : starts at 2024-12-19 17:35:44.158663 (4310945.565792602) Experiment : This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -13,6 +13,7 @@ Import is handled by 8 processes (pods). Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. +Loading is skipped. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -21,28 +22,19 @@ Cluster monitoring : is running done YugabyteDB-64-8-65536 : will start now - waiting 30s - : done -YugabyteDB-64-8-65536 : will start benchmarking but not before 2024-10-29 17:35:42 (that is in 60 secs) +YugabyteDB-64-8-65536 : will start benchmarking but not before 2024-12-19 16:37:44 (that is in 60 secs) - waiting 30s - : done -YugabyteDB-64-8-65536 : will start benchmarking but not before 2024-10-29 17:35:42 +YugabyteDB-64-8-65536 : will start benchmarking but not before 2024-12-19 16:37:44 done YugabyteDB-64-8-65536 : benchmarks done 0 of 1. This will be client 1 YugabyteDB-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} YugabyteDB-64-8-65536-1 : start benchmarking -YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1730223222 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734626144 - waiting 10s - : done -YugabyteDB-64-8-65536 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-64-8-65536-1730223222-1-1-h9krw: found -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks +YugabyteDB-64-8-65536 : benchmarking is waiting for job bexhoma-benchmarker-yugabytedb-64-8-65536-1734626144-1-1-5r565: found - waiting 30s - : done YugabyteDB-64-8-65536 : has running benchmarks - waiting 30s - : done @@ -66,23 +58,19 @@ YugabyteDB-64-8-65536 : has running benchmarks done YugabyteDB-64-8-65536 : has running benchmarks YugabyteDB-64-8-65536-1 : showing benchmarker times -YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 523 -YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1730223347, 1730223859)] -YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1730223347, 1730223859)]} +YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 342 +YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1734626273, 1734626593)] +YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1734626273, 1734626593)]} YugabyteDB-64-8-65536-1 : collecting execution metrics of SUT +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yb-tserver-(.*)"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of benchmarker +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734626144(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} done YugabyteDB-64-8-65536 : can be stopped -Experiment : ends at 2024-10-29 18:45:12.049630 (4959852.173609247) - 689.86s total +Experiment : ends at 2024-12-19 17:44:14.966144 (4311456.373277081) - 510.81s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... [UPDATE].Return=OK -connection_pod ... -YugabyteDB-64-8-65536-1-1 YugabyteDB-64-8-65536-1 ... 5001222 - -[1 rows x 43 columns] -Workflow {'YugabyteDB-64-8-65536': [[1]]} Result workflow complete ## Show Summary @@ -90,9 +78,9 @@ Result workflow complete ### Workload YCSB SF=1 Type: ycsb - Duration: 690s - Code: 1730223222 - This includes no queries. YCSB runs the benchmark + Duration: 511s + Code: 1734626144 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -102,6 +90,7 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. + Loading is skipped. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -109,19 +98,40 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319248 + disk:249253676 datadisk:39268 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441407636 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125219072 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586475888 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 19547.36 511578.0 4998778 64703.0 5001222 66239.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31287.55 319616.0 4999554 39551.0 5000446 43999.0 ### Workflow @@ -133,11 +143,11 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19802.0 26.15 14.21 24.03 +YugabyteDB-64-8-65536-1 13772.52 16.0 7.23 23.99 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 1039.41 2.13 0.61 0.61 +YugabyteDB-64-8-65536-1 982.63 3.4 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_yugabytedb_2_summary.txt b/logs_tests/doc_ycsb_yugabytedb_2_summary.txt index 3b21cc5d9..35bfad77a 100644 --- a/logs_tests/doc_ycsb_yugabytedb_2_summary.txt +++ b/logs_tests/doc_ycsb_yugabytedb_2_summary.txt @@ -3,9 +3,9 @@ ### Workload YCSB SF=1 Type: ycsb - Duration: 690s - Code: 1730223222 - This includes no queries. YCSB runs the benchmark + Duration: 511s + Code: 1734626144 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -15,6 +15,7 @@ YCSB SF=1 Loading is fixed to cl-worker19. Benchmarking is fixed to cl-worker19. SUT is fixed to cl-worker11. + Loading is skipped. Loading is tested with [64] threads, split into [8] pods. Benchmarking is tested with [64] threads, split into [1] pods. Benchmarking is run as [1] times the number of benchmarking pods. @@ -22,19 +23,40 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254319248 + disk:249253676 datadisk:39268 requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:441407636 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:125219072 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:586475888 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 19547.36 511578.0 4998778 64703.0 5001222 66239.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 31287.55 319616.0 4999554 39551.0 5000446 43999.0 ### Workflow @@ -46,11 +68,11 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 19802.0 26.15 14.21 24.03 +YugabyteDB-64-8-65536-1 13772.52 16.0 7.23 23.99 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 1039.41 2.13 0.61 0.61 +YugabyteDB-64-8-65536-1 982.63 3.4 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_yugabytedb_3.log b/logs_tests/doc_ycsb_yugabytedb_3.log index 4b2b1ffdc..2f56c59ee 100644 --- a/logs_tests/doc_ycsb_yugabytedb_3.log +++ b/logs_tests/doc_ycsb_yugabytedb_3.log @@ -2,8 +2,8 @@ Data Directory : is running Result Directory : is running Dashboard : is running Message Queue : is running -Experiment : has code 1730457505 -Experiment : starts at 2024-11-01 11:38:25.445463 (142306.852591312) +Experiment : has code 1734626805 +Experiment : starts at 2024-12-19 17:46:45.657557 (4311607.064689156) Experiment : This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -23,47 +23,34 @@ done YugabyteDB-64-8-65536 : will start now - waiting 30s - : done YugabyteDB-64-8-65536 : is not loaded yet -YugabyteDB-64-8-65536 : will start loading but not before 2024-11-01 10:40:26 (that is in 60 secs) +YugabyteDB-64-8-65536 : will start loading but not before 2024-12-19 16:48:46 (that is in 60 secs) - waiting 30s - : done YugabyteDB-64-8-65536 : is not loaded yet -YugabyteDB-64-8-65536 : will start loading but not before 2024-11-01 10:40:26 +YugabyteDB-64-8-65536 : will start loading but not before 2024-12-19 16:48:46 done YugabyteDB-64-8-65536 : is not loaded yet YugabyteDB-64-8-65536 : start asynch loading scripts of type loaded YugabyteDB-64-8-65536 : is loading - waiting 30s - : done YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -- waiting 30s - : done -YugabyteDB-64-8-65536 : is loading -Error from server (NotFound): persistentvolumeclaims "bexhoma-workers-bexhoma-sut-yugabytedb-64-8-65536-1730457505-85689b55d9-fvc92" not found done YugabyteDB-64-8-65536 : showing loader times -YugabyteDB-64-8-65536 : loader timespan (first to last [s]) = 296 +YugabyteDB-64-8-65536 : loader timespan (first to last [s]) = 37 YugabyteDB-64-8-65536 : benchmarks done 0 of 1. This will be client 1 YugabyteDB-64-8-65536 : we will change parameters of benchmark as {'PARALLEL': '1', 'SF': '1', 'BEXHOMA_SYNCH_LOAD': 1, 'YCSB_THREADCOUNT': 64, 'YCSB_TARGET': 65536, 'YCSB_STATUS': 1, 'YCSB_WORKLOAD': 'a', 'YCSB_ROWS': 1000000, 'YCSB_OPERATIONS': 10000000, 'YCSB_BATCHSIZE': ''} YugabyteDB-64-8-65536-1 : start benchmarking -YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1730457505 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-0 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-1 +YugabyteDB-64-8-65536 : distributed system - get host info for worker yb-tserver-2 +YugabyteDB-64-8-65536 : benchmarking results in folder /home/perdelt/benchmarks/1734626805 - waiting 10s - : done found YugabyteDB-64-8-65536-1 : collecting loading metrics of SUT +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yb-tserver-(.*)"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of data generator +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734626805(.*)", container="datagenerator"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of data injector +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734626805(.*)", container="sensor"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} - waiting 30s - : done YugabyteDB-64-8-65536 : has running benchmarks - waiting 30s - : done @@ -84,185 +71,22 @@ YugabyteDB-64-8-65536 : has running benchmarks YugabyteDB-64-8-65536 : has running benchmarks - waiting 30s - : done YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -- waiting 30s - : done -YugabyteDB-64-8-65536 : has running benchmarks -Error from server (NotFound): persistentvolumeclaims "bexhoma-workers-bexhoma-sut-yugabytedb-64-8-65536-1730457505-85689b55d9-fvc92" not found done YugabyteDB-64-8-65536 : has running benchmarks YugabyteDB-64-8-65536-1 : showing benchmarker times -YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 2561 -YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1730457971, 1730460510)] -YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1730457971, 1730460510)]} +YugabyteDB-64-8-65536-1 : benchmarker timespan (start to end single container [s]) = 355 +YugabyteDB-64-8-65536-1 : benchmarker times (start/end per pod and container) = [(1734627001, 1734627326)] +YugabyteDB-64-8-65536-1 : found and updated times {'benchmarker': [(1734627001, 1734627326)]} YugabyteDB-64-8-65536-1 : collecting execution metrics of SUT +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yb-tserver-(.*)"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} YugabyteDB-64-8-65536-1 : collecting metrics of benchmarker +YugabyteDB-64-8-65536-1 : example metric {'query': '(sum(max(container_memory_working_set_bytes{pod=~"(.*)yugabytedb-64-8-65536-1734626805(.*)", container="dbmsbenchmarker"}) by (pod)))/1024/1024', 'title': 'CPU Memory [MiB]'} done YugabyteDB-64-8-65536 : can be stopped -Experiment : ends at 2024-11-01 12:29:36.469991 (145377.877123149) - 3071.02s total +Experiment : ends at 2024-12-19 17:56:35.924937 (4312197.33207037) - 590.27s total Experiment : downloading partial results Experiment : uploading full results Experiment : uploading workload file -Benchmarking connection ... [UPDATE].Return=OK -connection_pod ... -YugabyteDB-64-8-65536-1-1 YugabyteDB-64-8-65536-1 ... 5000381 - -[1 rows x 43 columns] -Workflow {'YugabyteDB-64-8-65536': [[1]]} -Loading connection ... [INSERT].Return=OK -connection_pod ... -YugabyteDB-64-8-65536-1 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-2 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-3 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-4 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-5 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-6 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-7 YugabyteDB-64-8-65536 ... 125000 -YugabyteDB-64-8-65536-8 YugabyteDB-64-8-65536 ... 125000 - -[8 rows x 36 columns] Result workflow complete ## Show Summary @@ -270,9 +94,9 @@ Result workflow complete ### Workload YCSB SF=1 Type: ycsb - Duration: 3072s - Code: 1730457505 - This includes no queries. YCSB runs the benchmark + Duration: 591s + Code: 1734626805 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -290,25 +114,46 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254915992 - datadisk:39222 + disk:249214420 + datadisk:39106 volume_size:1.0G volume_used:36M requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439198928 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122937852 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584265648 ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -YugabyteDB-64-8-65536 1 64 65536 8 3390.978244 295234.0 1000000 77023.0 +YugabyteDB-64-8-65536 1 64 65536 8 27920.198198 36082.0 1000000 15099.0 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 3939.8 2538203.0 4999619 73599.0 5000381 76287.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 30852.87 324119.0 4999076 26703.0 5000924 44287.0 ### Workflow @@ -320,19 +165,19 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 10050.64 11.96 3.23 13.17 +YugabyteDB-64-8-65536-1 14677.38 4.86 4.23 13.99 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 202.36 0.38 4.55 4.58 +YugabyteDB-64-8-65536-1 0.08 0 0.01 0.01 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 106669.7 16.01 7.99 24.02 +YugabyteDB-64-8-65536-1 14586.01 17.96 4.88 16.69 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 1290.17 0.62 0.61 0.61 +YugabyteDB-64-8-65536-1 771.47 2.69 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/logs_tests/doc_ycsb_yugabytedb_3_summary.txt b/logs_tests/doc_ycsb_yugabytedb_3_summary.txt index ddb2d6402..aeb7f953b 100644 --- a/logs_tests/doc_ycsb_yugabytedb_3_summary.txt +++ b/logs_tests/doc_ycsb_yugabytedb_3_summary.txt @@ -3,9 +3,9 @@ ### Workload YCSB SF=1 Type: ycsb - Duration: 3072s - Code: 1730457505 - This includes no queries. YCSB runs the benchmark + Duration: 591s + Code: 1734626805 + YCSB tool runs the benchmark. This experiment compares run time and resource consumption of YCSB queries. Workload is 'A'. Number of rows to insert is 1000000. Number of operations is 10000000. Batch size is ''. YCSB is performed using several threads and processes. Target is based on multiples of '16384'. Factors for loading are [4]. Factors for benchmarking are [4]. @@ -23,25 +23,46 @@ YCSB SF=1 ### Connections YugabyteDB-64-8-65536-1 uses docker image postgres:15.0 - RAM:541008605184 + RAM:541008576512 CPU:AMD Opteron(tm) Processor 6378 Cores:64 - host:5.15.0-116-generic + host:5.15.0-126-generic node:cl-worker11 - disk:254915992 - datadisk:39222 + disk:249214420 + datadisk:39106 volume_size:1.0G volume_used:36M requests_cpu:4 requests_memory:16Gi + worker 0 + RAM:1081966526464 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker28 + disk:439198928 + worker 1 + RAM:540587499520 + CPU:AMD EPYC 7502 32-Core Processor + Cores:128 + host:5.15.0-126-generic + node:cl-worker22 + disk:122937852 + worker 2 + RAM:1081965555712 + CPU:AMD EPYC 7742 64-Core Processor + Cores:256 + host:5.15.0-1067-nvidia + node:cl-worker27 + disk:584265648 ### Loading experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [INSERT].Return=OK [INSERT].99thPercentileLatency(us) -YugabyteDB-64-8-65536 1 64 65536 8 3390.978244 295234.0 1000000 77023.0 +YugabyteDB-64-8-65536 1 64 65536 8 27920.198198 36082.0 1000000 15099.0 ### Execution experiment_run threads target pod_count [OVERALL].Throughput(ops/sec) [OVERALL].RunTime(ms) [READ].Return=OK [READ].99thPercentileLatency(us) [UPDATE].Return=OK [UPDATE].99thPercentileLatency(us) -YugabyteDB-64-8-65536-1 1 64 65536 1 3939.8 2538203.0 4999619 73599.0 5000381 76287.0 +YugabyteDB-64-8-65536-1 1 64 65536 1 30852.87 324119.0 4999076 26703.0 5000924 44287.0 ### Workflow @@ -53,19 +74,19 @@ DBMS YugabyteDB-64-8-65536 - Pods [[1]] ### Ingestion - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 10050.64 11.96 3.23 13.17 +YugabyteDB-64-8-65536-1 14677.38 4.86 4.23 13.99 ### Ingestion - Loader CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 202.36 0.38 4.55 4.58 +YugabyteDB-64-8-65536-1 0.08 0 0.01 0.01 ### Execution - SUT CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 106669.7 16.01 7.99 24.02 +YugabyteDB-64-8-65536-1 14586.01 17.96 4.88 16.69 ### Execution - Benchmarker CPU [CPUs] Max CPU Max RAM [Gb] Max RAM Cached [Gb] -YugabyteDB-64-8-65536-1 1290.17 0.62 0.61 0.61 +YugabyteDB-64-8-65536-1 771.47 2.69 0.61 0.61 ### Tests TEST passed: [OVERALL].Throughput(ops/sec) contains no 0 or NaN diff --git a/test-cloud-notes.sh b/test-cloud-notes.sh new file mode 100644 index 000000000..9810abf71 --- /dev/null +++ b/test-cloud-notes.sh @@ -0,0 +1,276 @@ +#!/bin/bash +###################################################################################### +# Bash Script for Bexhoma Test Runs - Test scripts for database services +###################################################################################### +# +# This scripts starts a sequence of experiments with varying parameters. +# Each experiment waits until previous tests have been completed. +# Logs are written to a log folder. +# At the end, logs are cleaned and the summaries are extracted and stored in separate files. +# +# Author: Patrick K. Erdelt +# Email: patrick.erdelt@bht-berlin.de +# Date: 2024-10-01 +# Version: 1.0 +###################################################################################### + + +BEXHOMA_NODE_SUT="cl-worker11" +BEXHOMA_NODE_LOAD="cl-worker19" +BEXHOMA_NODE_BENCHMARK="cl-worker19" +LOG_DIR="./logs_tests" + +mkdir -p $LOG_DIR + +# Define the wait_process function +wait_process() { + local process_name=$1 + + # Wait until the process with the name passed as an argument has terminated + while ps aux | grep "[p]ython $process_name.py" > /dev/null; do + # Process is still running, wait for 5 seconds + echo "$(date +"%Y-%m-%d %H:%M:%S"): Waiting for process python $process_name.py to terminate..." + sleep 60 + done + + echo "$(date +"%Y-%m-%d %H:%M:%S"): Process python $process_name.py has terminated." +} + +# Example usage +#wait_process "tpch" + + +# Wait for all previous jobs to complete +wait_process "tpch" +wait_process "tpcds" +wait_process "hammerdb" +wait_process "benchbase" +wait_process "ycsb" + + + + + + + + + + + + + + +############################################################### +################## TPC-DS Persistent Storage ################## +############################################################### + + + + +#### TCP-DS Persistent Storage (Example-TPC-DS.md) +nohup python tpcds.py -ms 4 -dt -tr \ + -dbms MySQL \ + -nlp 8 \ + -nlt 8 \ + -sf 1 \ + -t 1200 \ + -ii -ic -is \ + -nc 2 \ + -rst shared -rss 30Gi \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpcds_testcase_mysql_storage.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpcds" + + + + +#### TCP-DS Persistent Storage (Example-TPC-DS.md) +nohup python tpcds.py -ms 4 -dt -tr \ + -dbms PostgreSQL \ + -nlp 8 \ + -nlt 8 \ + -sf 1 \ + -t 1200 \ + -ii -ic -is \ + -nc 2 \ + -rst shared -rss 30Gi \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpcds_testcase_postgresql_storage.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpcds" + + + + +#### TCP-DS Persistent Storage (Example-TPC-DS.md) +nohup python tpcds.py -ms 4 -dt -tr \ + -dbms MariaDB \ + -nlp 8 \ + -nlt 8 \ + -sf 1 \ + -t 1200 \ + -ii -ic -is \ + -nc 2 \ + -rst shared -rss 30Gi \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpcds_testcase_mariadb_storage.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpcds" + + + + +#### TCP-DS Persistent Storage (Example-TPC-DS.md) +nohup python tpcds.py -ms 4 -dt -tr \ + -dbms MonetDB \ + -nlp 8 \ + -nlt 8 \ + -sf 1 \ + -t 1200 \ + -ii -ic -is \ + -nc 2 \ + -rst shared -rss 30Gi \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpcds_testcase_monetdb_storage.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpcds" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +####################################################### +############# YCSB Monitoring PostgreSQL ############## +####################################################### + + +#PostgreSQL +##################################### sidecar +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms PostgreSQL -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m run $LOG_DIR/test_ycsb_postgresql_sidecar.log & +#-) ok + +##################################### daemonset +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms PostgreSQL -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -db run $LOG_DIR/test_ycsb_postgresql_daemonset.log & +#-) ok + +##################################### cluster preinstalled +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms PostgreSQL -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -db run $LOG_DIR/test_ycsb_postgresql_cluster.log & +#-) ok + +#DatabaseService +##################################### cluster preinstalled + +######################### With loading +#nohup python ycsb.py -ms 5 -tr -sf 1 -sfo 1 --workload a -dbms DatabaseService -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc run $LOG_DIR/test_ycsb_databaseservice_tmp1.log & +#-) k8s: Dummy deployment - für loading z.B. PostgreSQL kompatibel: PostgreSQL Container wegen psql +#-) Metrics +#--) Execution quatsch (?) + +######################### No loading +#nohup python ycsb.py -ms 5 -tr -sf 1 -sfo 1 --workload a -dbms DatabaseService -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -sl run $LOG_DIR/test_ycsb_databaseservice_tmp2.log & +#-) cluster.config: Infos und JDBC Verbindungsdaten +#-) OK + + + +#YugabyteDB +##################################### daemonset +######################### With loading +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc run $LOG_DIR/test_ycsb_yugabytedb_tmp1.log & +#-) OK, after retries (usertable, host name) + +######################### No loading +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -sl run $LOG_DIR/test_ycsb_yugabytedb_tmp2.log & +#-) OK + +######################### No loading, with debug +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -sl -db run $LOG_DIR/test_ycsb_yugabytedb_tmp2db.log & +#-) without loading: ok + + +##################################### cluster preinstalled +######################### With loading +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc run $LOG_DIR/test_ycsb_yugabytedb_tmp3.log & + +######################### No loading +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -sl run $LOG_DIR/test_ycsb_yugabytedb_tmp4.log & +#-) OK + +######################### No loading, with debug +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -mc -sl -db run $LOG_DIR/test_ycsb_yugabytedb_tmp4db.log & +#-) OK + + +##################################### sidecar + +######################### No loading +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -sl run $LOG_DIR/test_ycsb_yugabytedb_tmp6.log & + +######################### No loading, with debug +#nohup python ycsb.py -ms 1 -tr -sf 1 -sfo 1 --workload a -dbms YugabyteDB -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK -tb 16384 -nlp 8 -nlt 64 -nlf 4 -nbp 1 -nbt 64 -nbf 4 -ne 1 -nc 1 -m -sl -db run $LOG_DIR/test_ycsb_yugabytedb_tmp6db.log & + + + + + + + + +########################################### +############## Clean Folder ############### +########################################### + + + +export MYDIR=$(pwd) +cd $LOG_DIR +# remove connection errors from logs +grep -rl "Warning: Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens." . | xargs sed -i '/Warning: Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens./d' +cd $MYDIR + +# Loop over each text file in the source directory +for file in "$LOG_DIR"/*.log; do + # Get the filename without the path and extension + echo "Cleaning $file" + filename=$(basename "$file" .log) + # Extract lines starting from "## Show Summary" and save as _summary.txt in the destination directory + awk '/## Show Summary/ {show=1} show {print}' "$file" > "$LOG_DIR/${filename}_summary.txt" +done + +echo "Extraction complete! Files are saved in $LOG_DIR." + diff --git a/test-cloud.sh b/test-cloud.sh new file mode 100755 index 000000000..e81f03c4a --- /dev/null +++ b/test-cloud.sh @@ -0,0 +1,634 @@ +#!/bin/bash +###################################################################################### +# Bash Script for Bexhoma Test Runs - Test scripts for database services +###################################################################################### +# +# This scripts starts a sequence of experiments with varying parameters. +# Each experiment waits until previous tests have been completed. +# Logs are written to a log folder. +# At the end, logs are cleaned and the summaries are extracted and stored in separate files. +# +# Author: Patrick K. Erdelt +# Email: patrick.erdelt@bht-berlin.de +# Date: 2024-10-01 +# Version: 1.0 +###################################################################################### + + +BEXHOMA_NODE_SUT="cl-worker11" +BEXHOMA_NODE_LOAD="cl-worker19" +BEXHOMA_NODE_BENCHMARK="cl-worker19" +LOG_DIR="./logs_tests" + +mkdir -p $LOG_DIR + +# Define the wait_process function +wait_process() { + local process_name=$1 + + # Wait until the process with the name passed as an argument has terminated + while ps aux | grep "[p]ython $process_name.py" > /dev/null; do + # Process is still running, wait for 5 seconds + echo "$(date +"%Y-%m-%d %H:%M:%S"): Waiting for process python $process_name.py to terminate..." + sleep 60 + done + + echo "$(date +"%Y-%m-%d %H:%M:%S"): Process python $process_name.py has terminated." +} + +# Example usage +#wait_process "tpch" + + +# Wait for all previous jobs to complete +wait_process "tpch" +wait_process "tpcds" +wait_process "hammerdb" +wait_process "benchbase" +wait_process "ycsb" + + + + + + + + + + + + + +################################################ +################## YugaByteDB ################## +################################################ + + +install_yugabytedb() { + helm install bexhoma yugabytedb/yugabyte \ + --version 2.23.0 \ + --set \ +gflags.tserver.ysql_enable_packed_row=true,\ +resource.master.limits.cpu=2,\ +resource.master.limits.memory=8Gi,\ +resource.master.requests.cpu=2,\ +resource.master.requests.memory=8Gi,\ +resource.tserver.limits.cpu=8,\ +resource.tserver.limits.memory=8Gi,\ +resource.tserver.requests.cpu=8,\ +resource.tserver.requests.memory=8Gi,\ +storage.master.size=100Gi,\ +storage.tserver.size=100Gi,\ +storage.ephemeral=true,\ +tserver.tolerations[0].effect=NoSchedule,\ +tserver.tolerations[0].key=nvidia.com/gpu,\ +enableLoadBalancer=True + sleep 60 +} + +remove_yugabytedb() { + helm delete bexhoma + kubectl delete pvc -l app=yb-tserver + kubectl delete pvc -l app=yb-master + sleep 60 +} + +# install YugabyteDB +install_yugabytedb + +#### YCSB Ingestion (Example-YugaByteDB.md) +nohup python ycsb.py -ms 1 -tr \ + -sf 1 \ + -sfo 10 \ + --workload a \ + -dbms YugabyteDB \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + run $LOG_DIR/doc_ycsb_yugabytedb_1.log & + + +wait_process "ycsb" + + +#### YCSB Execution (Example-YugaByteDB.md) +nohup python ycsb.py -ms 1 -tr \ + -sf 1 \ + -sfo 10 \ + --workload a \ + -dbms YugabyteDB \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -sl \ + run $LOG_DIR/doc_ycsb_yugabytedb_2.log & + + +wait_process "ycsb" + + +# remove YugabyteDB installation +remove_yugabytedb + +# install YugabyteDB +install_yugabytedb + +kubectl delete pvc bexhoma-storage-yugabytedb-ycsb-1 + + +#### YCSB Dummy Persistent Storage (Example-YugaByteDB.md) +nohup python ycsb.py -ms 1 -tr \ + -sf 1 \ + -sfo 10 \ + --workload a \ + -dbms YugabyteDB \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_ycsb_yugabytedb_3.log & + + +wait_process "ycsb" + + +# remove YugabyteDB installation +remove_yugabytedb + +# install YugabyteDB +install_yugabytedb + + +#### Benchbase Simple (Example-YugaByteDB.md) +nohup python benchbase.py -ms 1 -tr \ + -sf 16 \ + -sd 5 \ + -dbms YugabyteDB \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_yugabytedb_1.log & + + +wait_process "benchbase" + + +# remove YugabyteDB installation +remove_yugabytedb + +# install YugabyteDB +install_yugabytedb + + +#### Benchbase More Complex (Example-YugaByteDB.md) +nohup python benchbase.py -ms 1 -tr \ + -sf 128 \ + -sd 60 \ + -dbms YugabyteDB \ + -nbp 1,2,4,8 \ + -nbt 64 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_yugabytedb_2.log & + + +wait_process "benchbase" + + +# remove YugabyteDB installation +remove_yugabytedb + + + + + + + +################################################# +################## CockroachDB ################## +################################################# + + +#### YCSB Ingestion (Example-CockroachDB.md) +nohup python ycsb.py -ms 1 -tr \ + -sf 1 \ + -sfo 10 \ + -nw 3 \ + --workload a \ + -dbms CockroachDB \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + run $LOG_DIR/doc_ycsb_cockroachdb_1.log & + + +wait_process "ycsb" + + +#### Benchbase Simple (Example-CockroachDB.md) +nohup python benchbase.py -ms 1 -tr \ + -sf 16 \ + -sd 5 \ + -nw 3 \ + -dbms CockroachDB \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_cockroachdb_1.log & + + +wait_process "benchbase" + + +#### Benchbase Complex (Example-CockroachDB.md) +nohup python benchbase.py -ms 1 -tr \ + -sf 128 \ + -sd 60 \ + -nw 3 \ + -dbms CockroachDB \ + -nbp 1,2,4,8 \ + -nbt 64 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_cockroachdb_2.log & + + +wait_process "benchbase" + + + + + + + + + + + + + + + + +############################################################### +################### YCSB Database Service ##################### +############################################################### + + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +sleep 10 + + +#### YCSB Ingestion (Example-CloudDatabase.md) +nohup python ycsb.py -ms 2 -tr \ + -sf 1 \ + -sfo 1 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + run $LOG_DIR/doc_ycsb_databaseservice_1.log & + + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "ycsb" + + +#### YCSB Execution (Example-CloudDatabase.md) +nohup python ycsb.py -ms 2 -tr \ + -sf 1 \ + -sfo 10 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -sl \ + run $LOG_DIR/doc_ycsb_databaseservice_2.log & + + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "ycsb" + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +# we have to be sure the "cloud service" is ready - bexhoma does not check this in case dbms is not managed by bexhoma +sleep 300 + +# delete pvc of placeholder +kubectl delete pvc bexhoma-storage-databaseservice-ycsb-5 + +sleep 10 + + +#### YCSB Persistent Storage (Example-CloudDatabase.md) +nohup python ycsb.py -ms 2 -tr \ + -sf 5 \ + -sfo 10 \ + --workload a \ + -dbms DatabaseService \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -tb 16384 \ + -nlp 8 \ + -nlt 64 \ + -nlf 4 \ + -nbp 1 \ + -nbt 64 \ + -nbf 4 \ + -ne 1 \ + -nc 1 \ + -m -mc \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_ycsb_databaseservice_3.log & + + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "ycsb" + + + + + + + + + + + + + +############################################################### +################# Benchbase Database Service ################## +############################################################### + + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +sleep 10 + + +# no PVC +nohup python benchbase.py -ms 2 -tr \ + -sf 16 \ + -sd 5 \ + -dbms DatabaseService \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_databaseservice_1.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "benchbase" + +# no PVC, skip loading +nohup python benchbase.py -ms 2 -tr \ + -sf 16 \ + -sd 5 \ + -dbms DatabaseService \ + -nbp 1,2 \ + -nbt 16 \ + -nbf 16 \ + -tb 1024 \ + -sl \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_benchbase_databaseservice_2.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "benchbase" + + + + + + + + + + +############################################################### +################### TPC-H Database Service #################### +############################################################### + + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +sleep 10 + + +#### TCP-H Monitoring (Example-CloudDatabase.md) +# no PVC +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_1.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpch" + + +#### TCP-H Monitoring (Example-TPC-H.md) +# no PVC, skip loading +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -sl \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_2.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpch" + + +# delete pvc of placeholder +kubectl delete pvc bexhoma-storage-databaseservice-tpch-3 + +sleep 10 + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + +sleep 30 + +# start database service placeholder +kubectl create -f k8s/deploymenttemplate-PostgreSQLService.yml + +sleep 10 + +# login into database service placeholder +# kubectl port-forward svc/bexhoma-service 9091:9091 + +#### TCP-H Monitoring (Example-TPC-H.md) +# with PVC, ingestion +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_3.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpch" + + +#### TCP-H Monitoring (Example-TPC-H.md) +# with PVC, execution only +nohup python tpch.py -ms 2 -dt -tr \ + -dbms DatabaseService \ + -nlp 8 \ + -nlt 8 \ + -sf 3 \ + -ii -ic -is \ + -t 1200 \ + -m -mc \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -rst shared -rss 1Gi \ + run $LOG_DIR/doc_tpch_testcase_databaseservice_4.log & + +#### Wait so that next experiment receives a different code +#sleep 600 +wait_process "tpch" + + +# delete database service placeholder +kubectl delete deployment bexhoma-deployment-postgres +kubectl delete svc bexhoma-service + + + + + + + +########################################### +############## Clean Folder ############### +########################################### + + + +export MYDIR=$(pwd) +cd $LOG_DIR +# remove connection errors from logs +grep -rl "Warning: Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens." . | xargs sed -i '/Warning: Use tokens from the TokenRequest API or manually created secret-based tokens instead of auto-generated secret-based tokens./d' +cd $MYDIR + +# Loop over each text file in the source directory +for file in "$LOG_DIR"/*.log; do + # Get the filename without the path and extension + echo "Cleaning $file" + filename=$(basename "$file" .log) + # Extract lines starting from "## Show Summary" and save as _summary.txt in the destination directory + awk '/## Show Summary/ {show=1} show {print}' "$file" > "$LOG_DIR/${filename}_summary.txt" +done + +echo "Extraction complete! Files are saved in $LOG_DIR." + diff --git a/test-docs.sh b/test-docs.sh index 1c4c8140b..0615953c5 100644 --- a/test-docs.sh +++ b/test-docs.sh @@ -502,7 +502,7 @@ wait_process "tpcds" #### Remove persistent storage -kubectl delete pvc bexhoma-storage-postgresql-tpcds-1 +kubectl delete pvc bexhoma-storage-monetdb-tpcds-1 sleep 30 diff --git a/test-more.sh b/test-more.sh index 75d1f894b..9cc948d38 100755 --- a/test-more.sh +++ b/test-more.sh @@ -325,6 +325,46 @@ wait_process "tpcds" +########################################### +######### TPC-DS Compare Storage ########## +########################################### + + +#### TCP-DS Compare with Persistent Storage +nohup python tpcds.py -ms 4 -dt -tr \ + -nlp 8 \ + -nlt 8 \ + -sf 1 \ + -t 3600 \ + -ii -ic -is \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + -rst shared -rss 30Gi \ + run $LOG_DIR/doc_tpcds_testcase_compare_storage.log & + + +#### Wait so that next experiment receives a different code +wait_process "tpcds" + + +########################################### +######### TPC-DS Compare at SF=10 ######### +########################################### + + +#### TCP-DS Compare at SF=10 +nohup python tpcds.py -ms 6 -dt -tr \ + -nlp 8 \ + -nlt 8 \ + -sf 10 \ + -t 3600 \ + -ii -ic -is \ + -rnn $BEXHOMA_NODE_SUT -rnl $BEXHOMA_NODE_LOAD -rnb $BEXHOMA_NODE_BENCHMARK \ + run $LOG_DIR/doc_tpcds_testcase_compare_10.log & + + +#### Wait so that next experiment receives a different code +wait_process "tpcds" + diff --git a/tpch.py b/tpch.py index 3bbb80f18..101d58953 100644 --- a/tpch.py +++ b/tpch.py @@ -37,7 +37,7 @@ parser = argparse.ArgumentParser(description=description) parser.add_argument('mode', help='profile the import or run the TPC-H queries', choices=['profiling', 'run', 'start', 'load', 'empty', 'summary']) parser.add_argument('-aws', '--aws', help='fix components to node groups at AWS', action='store_true', default=False) - parser.add_argument('-dbms','--dbms', help='DBMS', choices=['PostgreSQL', 'MonetDB', 'MySQL', 'MariaDB'], default=[], action='append') + parser.add_argument('-dbms','--dbms', help='DBMS', choices=['PostgreSQL', 'MonetDB', 'MySQL', 'MariaDB', 'DatabaseService'], default=[], action='append') parser.add_argument('-lit', '--limit-import-table', help='limit import to one table, name of this table', default='') parser.add_argument('-db', '--debug', help='dump debug informations', action='store_true') parser.add_argument('-sl', '--skip-loading', help='do not ingest, start benchmarking immediately', action='store_true', default=False) @@ -187,6 +187,8 @@ config.set_storage( storageConfiguration = 'postgresql' ) + if skip_loading: + config.loading_deactivated = True config.jobtemplate_loading = "jobtemplate-loading-tpch-PostgreSQL.yml" config.set_loading_parameters( SF = SF, @@ -213,6 +215,8 @@ config.set_storage( storageConfiguration = 'monetdb' ) + if skip_loading: + config.loading_deactivated = True config.jobtemplate_loading = "jobtemplate-loading-tpch-MonetDB.yml" config.set_loading_parameters( SF = SF, @@ -239,6 +243,8 @@ config.set_storage( storageConfiguration = 'mariadb' ) + if skip_loading: + config.loading_deactivated = True config.jobtemplate_loading = "jobtemplate-loading-tpch-MariaDB.yml" config.set_loading_parameters( SF = SF, @@ -268,6 +274,8 @@ config.set_storage( storageConfiguration = 'mysql' ) + if skip_loading: + config.loading_deactivated = True config.jobtemplate_loading = "jobtemplate-loading-tpch-MySQL.yml" config.set_loading_parameters( SF = SF, @@ -289,6 +297,36 @@ DBMSBENCHMARKER_DEV = debugging, ) config.set_loading(parallel=split_portion, num_pods=loading_pods_total) + if ("DatabaseService" in args.dbms):# or len(args.dbms) == 0): # not included per default + # DatabaseService + name_format = 'DatabaseService-{cluster}-{pods}' + config = configurations.default(experiment=experiment, docker='DatabaseService', configuration=name_format.format(cluster=cluster_name, pods=loading_pods_total, split=split_portion), dialect='PostgreSQL', alias='DBMS A1') + config.monitoring_sut = False # cannot be monitored since outside of K8s + if skip_loading: + config.loading_deactivated = True + config.set_storage( + storageConfiguration = 'databaseservice' + ) + config.jobtemplate_loading = "jobtemplate-loading-tpch-PostgreSQL.yml" + config.set_loading_parameters( + SF = SF, + PODS_TOTAL = str(loading_pods_total), + PODS_PARALLEL = str(split_portion), + STORE_RAW_DATA = 1, + STORE_RAW_DATA_RECREATE = 0, + BEXHOMA_SYNCH_LOAD = 1, + BEXHOMA_SYNCH_GENERATE = 1, + TRANSFORM_RAW_DATA = 1, + TPCH_TABLE = limit_import_table, + BEXHOMA_HOST = 'bexhoma-service', + ) + config.set_benchmarking_parameters( + SF = SF, + DBMSBENCHMARKER_RECREATE_PARAMETER = recreate_parameter, + DBMSBENCHMARKER_SHUFFLE_QUERIES = shuffle_queries, + DBMSBENCHMARKER_DEV = debugging, + ) + config.set_loading(parallel=split_portion, num_pods=loading_pods_total) ############## ### wait for necessary nodegroups to have planned size ############## diff --git a/ycsb.py b/ycsb.py index de5fe36cc..9ccf262e3 100644 --- a/ycsb.py +++ b/ycsb.py @@ -43,7 +43,7 @@ parser = argparse.ArgumentParser(description=description) parser.add_argument('mode', help='import YCSB data or run YCSB queries', choices=['run', 'start', 'load', 'summary'], default='run') parser.add_argument('-aws', '--aws', help='fix components to node groups at AWS', action='store_true', default=False) - parser.add_argument('-dbms','--dbms', help='DBMS to load the data', choices=['PostgreSQL', 'MySQL', 'MariaDB', 'YugabyteDB', 'CockroachDB'], default=[], action='append') + parser.add_argument('-dbms','--dbms', help='DBMS to load the data', choices=['PostgreSQL', 'MySQL', 'MariaDB', 'YugabyteDB', 'CockroachDB', 'DatabaseService'], default=[], action='append') parser.add_argument('-db', '--debug', help='dump debug informations', action='store_true') parser.add_argument('-sl', '--skip-loading', help='do not ingest, start benchmarking immediately', action='store_true', default=False) parser.add_argument('-cx', '--context', help='context of Kubernetes (for a multi cluster environment), default is current context', default=None) @@ -364,23 +364,27 @@ if skip_loading: config.loading_deactivated = True config.sut_service_name = "yb-tserver-service" # fix service name of SUT, because it is not managed by bexhoma - config.sut_container_name = "yb-tserver" # fix container name of SUT + config.sut_container_name = '' # fix container name of SUT def get_worker_pods(self): """ Returns a list of all pod names of workers for the current SUT. Default is component name is 'worker' for a bexhoma managed DBMS. + This is used for example to find the pods of the workers in order to get the host infos (CPU, RAM, node name, ...). YugabyteDB: This is yb-tserver-0, -1 etc. :return: list of endpoints """ - pods_worker = self.experiment.cluster.get_pods(component='worker', configuration=self.configuration, experiment=self.code) + pods_worker = ['yb-tserver-0', 'yb-tserver-1', 'yb-tserver-2'] + #pods_worker = self.experiment.cluster.get_pods(app='', component='', configuration='yb-tserver', experiment='') + #print("****************", pods_worker) return pods_worker - #config.get_worker_pods = types.MethodType(get_worker_pods, config) + config.get_worker_pods = types.MethodType(get_worker_pods, config) def create_monitoring(self, app='', component='monitoring', experiment='', configuration=''): """ Generate a name for the monitoring component. + This is used in a pattern for promql. Basically this is `{app}-{component}-{configuration}-{experiment}-{client}`. - For YugabyteDB, the service to be monitored is named like 'yb-tserver-'. + For YugabyteDB, the service of the SUT to be monitored is named like 'yb-tserver-'. :param app: app the component belongs to :param component: Component, for example sut or monitoring @@ -399,21 +403,29 @@ def get_worker_endpoints(self): Returns all endpoints of a headless service that monitors nodes of a distributed DBMS. These are IPs of cAdvisor instances. The endpoint list is to be filled in a config of an instance of Prometheus. - For YugabyteDB the service is fixed to be 'bexhoma-service-monitoring-default' and does not depend on the experiment. + By default, the workers can be found by the name of their component (worker-0 etc). + This is neccessary, when we have sidecar containers attached to workers of a distributed dbms. :return: list of endpoints """ - endpoints = self.experiment.cluster.get_service_endpoints(service_name="bexhoma-service-monitoring-default") + endpoints = [] + #name_worker = self.generate_component_name(component='worker', configuration=self.configuration, experiment=self.code) + pods_worker = self.get_worker_pods() + for pod in pods_worker: + #endpoint = '{worker}.{service_sut}'.format(worker=pod, service_sut=name_worker) + endpoint = '{worker}'.format(worker=pod) + endpoints.append(endpoint) + print('Worker Endpoint: {endpoint}'.format(endpoint = endpoint)) self.logger.debug("yugabytedb.get_worker_endpoints({})".format(endpoints)) return endpoints - #config.get_worker_endpoints = types.MethodType(get_worker_endpoints, config) + config.get_worker_endpoints = types.MethodType(get_worker_endpoints, config) def set_metric_of_config(self, metric, host, gpuid): """ Returns a promql query. Parameters in this query are substituted, so that prometheus finds the correct metric. Example: In 'sum(irate(container_cpu_usage_seconds_total{{container_label_io_kubernetes_pod_name=~"(.*){configuration}-{experiment}(.*)", container_label_io_kubernetes_pod_name=~"(.*){configuration}-{experiment}(.*)", container_label_io_kubernetes_container_name="dbms"}}[1m]))' configuration and experiment are placeholders and will be replaced by concrete values. - Here: We do not have a SUT that is specific to the experiment or configuration. + YugabyteDB: We do not have a SUT that is specific to the experiment or configuration. The pod names follow a pattern like yb-tserver and there is no container name. :param metric: Parametrized promql query :param host: Name of the host the metrics should be collected from @@ -421,6 +433,7 @@ def set_metric_of_config(self, metric, host, gpuid): :return: promql query without parameters """ metric = metric.replace(', container="dbms"', '') + metric = metric.replace(', container_label_io_kubernetes_container_name="dbms"', '') return metric.format(host=host, gpuid=gpuid, configuration='yb-tserver', experiment='') config.set_metric_of_config = types.MethodType(set_metric_of_config, config) config.set_loading_parameters( @@ -528,6 +541,55 @@ def set_metric_of_config(self, metric, host, gpuid): #print(executor_list) config.add_benchmark_list(executor_list) cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service + if ("DatabaseService" in args.dbms):# or len(args.dbms) == 0): # not included per default + # DatabaseService + name_format = 'DatabaseService-{threads}-{pods}-{target}' + config = configurations.ycsb(experiment=experiment, docker='DatabaseService', configuration=name_format.format(threads=loading_threads, pods=loading_pods, target=loading_target), alias='DatabaseService') + config.monitoring_sut = False # cannot be monitored since outside of K8s + config.set_storage( + storageConfiguration = 'databaseservice' + ) + if skip_loading: + config.loading_deactivated = True + config.set_loading_parameters( + PARALLEL = str(loading_pods), + SF = SF, + BEXHOMA_SYNCH_LOAD = 1, + YCSB_THREADCOUNT = loading_threads_per_pod, + YCSB_TARGET = loading_target_per_pod, + YCSB_STATUS = 1, + YCSB_WORKLOAD = args.workload, + YCSB_ROWS = ycsb_rows, + YCSB_OPERATIONS = ycsb_operations_per_pod, + YCSB_BATCHSIZE = batchsize, + ) + config.set_loading(parallel=loading_pods, num_pods=loading_pods) + executor_list = [] + for factor_benchmarking in num_benchmarking_target_factors: + benchmarking_target = target_base*factor_benchmarking + for benchmarking_threads in num_benchmarking_threads: + for benchmarking_pods in num_benchmarking_pods: + for num_executor in list_clients: + benchmarking_pods_scaled = num_executor*benchmarking_pods + benchmarking_threads_per_pod = int(benchmarking_threads/benchmarking_pods) + ycsb_operations_per_pod = int(ycsb_operations/benchmarking_pods_scaled) + benchmarking_target_per_pod = int(benchmarking_target/benchmarking_pods) + executor_list.append(benchmarking_pods_scaled) + config.add_benchmarking_parameters( + PARALLEL = str(num_executor*benchmarking_pods), + SF = SF, + BEXHOMA_SYNCH_LOAD = 1, + YCSB_THREADCOUNT = benchmarking_threads_per_pod, + YCSB_TARGET = benchmarking_target_per_pod, + YCSB_STATUS = 1, + YCSB_WORKLOAD = args.workload, + YCSB_ROWS = ycsb_rows, + YCSB_OPERATIONS = ycsb_operations_per_pod, + YCSB_BATCHSIZE = batchsize, + ) + #print(executor_list) + config.add_benchmark_list(executor_list) + #cluster.max_sut = 1 # can only run 1 in same cluster because of fixed service ############## ### wait for necessary nodegroups to have planned size ##############