Skip to content

Commit

Permalink
Moving grafana link script (#679)
Browse files Browse the repository at this point in the history
* chaos es scripts

* adding requirements

* numpy

* adding index update

* adding grafana link

* adding grafana link

* taking out script graphana link

* taking out prints

* updating kube burner data soruces

* updating index data frame

* only try to find workload in intlab if not intlab to begin with

* adding env index from variable

* adding helper print lines
  • Loading branch information
paigerube14 authored Jan 13, 2025
1 parent 0e14df6 commit 44384f5
Show file tree
Hide file tree
Showing 8 changed files with 298 additions and 12 deletions.
Empty file added __init__.py
Empty file.
Empty file added es_scripts/__init__.py
Empty file.
21 changes: 21 additions & 0 deletions es_scripts/chaos_update.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import update_es_uuid
import json
ids=["XStsmZEBPJqRxZ0XWz26","Wyv-kJEBPJqRxZ0XaD35"]
ids=["zO95D5EBjXIeP7FHSwnd","Nip2D5EBPJqRxZ0XAFt9"]
index="krkn-telemetry"
params={"_id":"XStsmZEBPJqRxZ0XWz26"}

params={"run_uuid":'fd1984a4-97da-4ce7-9a28-95b7a8cc8cf9'}
#es_search=update_es_uuid.es_search(params,index=index )[0]


# update_es_uuid.delete_key(ids[0], index, "cluster_version")
# update_es_uuid.delete_key(ids[1], index, "cluster_version")
# del es_search['_source']["cluster_version"]
# print('es search 1' + str(es_search))
# update_es_uuid.update_data_to_elasticsearch(es_search['_id'], es_search, index)

with open("run.json","r+") as f:
es_search = json.loads(f.read())

update_es_uuid.upload_data_to_elasticsearch(es_search,index)
9 changes: 9 additions & 0 deletions es_scripts/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
argparse>=1.4.0
requests>=2.25.1
pyyaml>=5.4.1
python-jenkins>=1.7.0
numpy==1.24.0
elasticsearch<7.14.0
coloredlogs>=15.0.1
utils
urllib3
48 changes: 48 additions & 0 deletions es_scripts/update_cluster_verison.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import update_es_uuid
import time


index="krkn-telemetry"

params={'cloud_infrastructure':'AWS'}

must_not={"field": "cluster_version"}

#"must_not": [
# {
# "exists": {
# "field": "cluster_version"
# }
# }

i = 0
size = 60
from_pos = 0
while i < 20:
print('from pos' + str(from_pos))
es_search_all=update_es_uuid.es_search(params,must_not=must_not,index=index, size=size,from_pos=from_pos)
#print('search all' + str(es_search_all))
for es_search in es_search_all:

if "cluster_version" in es_search["_source"]:
print('continue')
continue
os_version = es_search["_source"]['node_summary_infos'][0]["os_version"]
# "Red Hat Enterprise Linux CoreOS 417.94.202410180656-0"

#418.94.20240906 2250-0
numbers = os_version.split(' ')[-1]
if "Plow" in numbers:
numbers = os_version.split(' ')[-2]
numbers = numbers.split(".")

print('numbers ' + str(os_version
))
#4.17.0-0.nightly-2024-10-21-185738
version = numbers[0][0] + '.' + numbers[0][1:] + ".0-0.nightly-" + numbers[2][:4] + "-" + numbers[2][4:6] +"-" + numbers[2][6:8] +"-" + numbers[2][8:12] + numbers[2][13:14]
print('version: ' + str(es_search['_id']) + " " +str(version))
data_to_update= {'cluster_version': version}
update_es_uuid.update_data_to_elasticsearch(es_search['_id'],data_to_update,index)
time.sleep(2)
i+=1
from_pos = size * i
71 changes: 71 additions & 0 deletions es_scripts/update_es_index.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import os
import time
from elasticsearch import Elasticsearch
import update_es_uuid


# elasticsearch constants
ES_URL = 'search-ocp-qe-perf-scale-test-elk-hcm7wtsqpxy7xogbu72bor4uve.us-east-1.es.amazonaws.com'
ES_USERNAME = os.getenv('ES_USERNAME')
ES_PASSWORD = os.getenv('ES_PASSWORD')


def update_data_to_elasticsearch(params, index, new_index):
''' updates captured data in RESULTS dictionary to Elasticsearch
'''

start = time.time()
matched_docs = update_es_uuid.es_search(params, index=index, size=30,from_pos=0)

print('doc length' + str(len(matched_docs[0])))
for item in matched_docs:
param_uuid = {"uuid": item['_source']['uuid']}
found_uuid = update_es_uuid.es_search(param_uuid, index=new_index)
print(' uui' + str(item))
if len(found_uuid) == 0:
print('find uui' + str(found_uuid))
response = upload_data_to_elasticsearch(item["_source"], new_index)
print(f"Response back was {response}")
#break
update_es_uuid.delete_es_entry(item['_id'], index)

end = time.time()
elapsed_time = end - start

# return elapsed time for upload if no issues
return elapsed_time

def upload_data_to_elasticsearch(item, index):
''' uploads captured data in RESULTS dictionary to Elasticsearch
'''

# create Elasticsearch object and attempt index
es = Elasticsearch(
[f'https://{ES_USERNAME}:{ES_PASSWORD}@{ES_URL}:443']
)

start = time.time()
print(f"Uploading item {item} to index {index} in Elasticsearch")
response = es.index(
index=index,
body=item
)
print(f"Response back was {response}")

end = time.time()
elapsed_time = end - start

# return elapsed time for upload if no issues
return elapsed_time
# to_update = {"profile": "IPI-on-AWS.install.yaml"}
# update_data_to_elasticsearch("2l41vYYBRpj_T8Zagru2", to_update)
# update_data_to_elasticsearch("4F41vYYBRpj_T8ZahLvF", to_update)
# update_data_to_elasticsearch("7F41vYYBRpj_T8ZairsN", to_update)
# update_data_to_elasticsearch("5F41vYYBRpj_T8Zahrtk", to_update)


params={"workload":'ovn-live-migration'}
new_index="ovn-live-migration"
old_index="ripsaw-kube-burner-000020"
update_data_to_elasticsearch(params, old_index, new_index)
#delete_es_entry("5F41vYYBRpj_T8Zahrtk")
52 changes: 40 additions & 12 deletions es_scripts/update_es_uuid.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,33 @@
import os
import time
from elasticsearch import Elasticsearch
import urllib3



# elasticsearch constants
ES_URL = 'search-ocp-qe-perf-scale-test-elk-hcm7wtsqpxy7xogbu72bor4uve.us-east-1.es.amazonaws.com'
ES_USERNAME = os.getenv('ES_USERNAME')
ES_PASSWORD = os.getenv('ES_PASSWORD')
ES_URL = os.environ.get('ES_URL','search-ocp-qe-perf-scale-test-elk-hcm7wtsqpxy7xogbu72bor4uve.us-east-1.es.amazonaws.com')
ES_USERNAME = os.environ.get('ES_USERNAME')
ES_PASSWORD = os.environ.get('ES_PASSWORD')


def es_search_url(params, wildcard="", should="",must_not="", index='perfscale-jenkins-metadata',size=10, from_pos=0, es_url="", es_user="", es_pass=""):
global ES_USERNAME, ES_URL, ES_PASSWORD
ES_USERNAME = es_user
ES_URL= es_url
ES_PASSWORD = es_pass
return es_search(params, wildcard, should,must_not, index,size, from_pos)

def es_search(params, wildcard="", should="",must_not="", index='perfscale-jenkins-metadata',size=10, from_pos=0):
# create Elasticsearch object and attempt index
urllib3.disable_warnings()
urllib3.logging.captureWarnings(False)
# create Elasticsearch object and attempt index
global ES_URL
if "http" in ES_URL:
ES_URL = ES_URL.split('//')[1]
es = Elasticsearch(
[f'https://{ES_USERNAME}:{ES_PASSWORD}@{ES_URL}:443']
[f'https://{ES_USERNAME}:{ES_PASSWORD}@{ES_URL}'], verify_certs=False, use_ssl=True
)

filter_data = []
filter_data.append({
"match_all": {}
Expand Down Expand Up @@ -54,22 +68,36 @@ def es_search(params, wildcard="", should="",must_not="", index='perfscale-jenki
must_not_list_data.append(must_not_data)
#print('must not' + str(must_not))
#print("f ilter_data " + str(filter_data))
search_result = es.search(index=index, body={"query": {"bool": {"filter": filter_data}}, "size": size, "from": from_pos})
#print('serach resu.t' + str(search_result))
try:
search_result = es.search(index=index, body={"query": {"bool": {"filter": filter_data}}, "size": size, "from": from_pos})
except Exception as e:
print('exception ' +str(e))
hits = []
if "hits" in search_result.keys() and "hits" in search_result['hits'].keys():
return search_result['hits']['hits']

return hits

def delete_es_entry(id):
def delete_es_entry(id, index = 'perfscale-jenkins-metadata'):
# create Elasticsearch object and attempt index
es = Elasticsearch(
[f'https://{ES_USERNAME}:{ES_PASSWORD}@{ES_URL}:443']
)

index = 'perfscale-jenkins-metadata'

es.delete(index=index, doc_type='_doc', id=id)

def delete_key(id, index, key_to_delete):

es = Elasticsearch(
[f'https://{ES_USERNAME}:{ES_PASSWORD}@{ES_URL}:443']
)

es.update(
index=index,
id=id,
body={"script": f"ctx._source.remove('{key_to_delete}')"}
)

def update_data_to_elasticsearch(id, data_to_update, index = 'perfscale-jenkins-metadata'):
''' updates captured data in RESULTS dictionary to Elasticsearch
'''
Expand All @@ -82,7 +110,7 @@ def update_data_to_elasticsearch(id, data_to_update, index = 'perfscale-jenkins-
start = time.time()

doc = es.get(index=index, doc_type='_doc', id=id)
##print('doc '+ str(doc))
#print('doc '+ str(doc))
for k,v in data_to_update.items():
doc['_source'][k] = v
es.update(index=index, doc_type='_doc', id=id, body={"doc": doc['_source']
Expand Down
109 changes: 109 additions & 0 deletions get_graphana_link.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
#!/usr/bin/env python3

import os
from es_scripts import update_es_uuid


def find_workload_type( current_run_uuid):
search_params = {
"uuid": current_run_uuid
}

index = os.getenv("es_metadata_index")

hits = update_es_uuid.es_search(search_params, index=index)
print('hits ' + str(hits))
if len(hits) <= 0:
#print('else')
workload_type = find_workload_type_sub(current_run_uuid)
print('workload type' + str(workload_type))
if workload_type == "Unknown" and "intlab" not in os.environ.get("ES_URL"):

es_metadata_index="ospst-perf-scale-ci*"
if os.getenv("ES_USERNAME_INTERNAL") is not None and os.getenv("ES_PASSWORD_INTERNAL") is not None:
os.environ["ES_USERNAME"] = os.getenv("ES_USERNAME_INTERNAL", None)
os.environ["ES_PASSWORD"] = os.getenv("ES_PASSWORD_INTERNAL", None)
# try finding in internal es

ES_URL = os.environ["ES_URL"] = "https://opensearch.app.intlab.redhat.com"

hits = update_es_uuid.es_search_url(search_params, es_url=ES_URL, es_pass=os.getenv("ES_PASSWORD_INTERNAL"), es_user=os.getenv("ES_USERNAME_INTERNAL"),index=es_metadata_index)
print('hits ' + str(hits))
else:
print("internal username and password not set")

if len(hits) == 0:
print("No data entry was found for that UUID")
return "Not Found"
return hits[0]['_source']


def find_workload_type_sub( current_run_uuid):
search_params = {
"uuid": current_run_uuid
}


if "intlab" in os.environ.get("ES_URL"):
workload_index_map = { "kube-burner":"ospst-ripsaw-kube-burner*" ,"ingress-perf":"ospst-ingress-perf*", "network-perf-v2":"ospst-k8s-netperf*"}
else:
workload_index_map = { "kube-burner":"ripsaw-kube-burner*" ,"ingress-perf":"ingress-perf*", "network-perf-v2":"k8s-netperf*","router-perf":"router-test-results"}
for k, v in workload_index_map.items():
hits = update_es_uuid.es_search(search_params, index=v)
print('hits extra' + str(hits))
if len(hits) > 0:
return k
return "Unknown"


def get_graphana():

baseline_uuid = os.environ.get("BASELINE_UUID")

uuid = os.environ.get("UUID")
workload_details = find_workload_type( uuid)
if workload_details != "Not Found":
workload = workload_details["benchmark"]
uuid_str = "&var-uuid=" + uuid
baseline_workload_details= []
if baseline_uuid != "" and baseline_uuid is not None:
for baseline in baseline_uuid.split(","):
uuid_str += "&var-uuid=" + baseline
baseline_workload_details.append(find_workload_type(baseline))


worker_count = f"&var-workerNodesCount={workload_details['workerNodesCount']}"
# data source for public dev es
# might want to be able to loop through multiple baseline uuids if more than one is passed
major_version = "&var-ocpMajorVersion=" + str(workload_details['releaseStream'][:4])
for baseline_details in baseline_workload_details:
if baseline_details['releaseStream'][:4] not in major_version:
major_version += "&var-ocpMajorVersion=" + str(baseline_details['releaseStream'][:4])
grafana_url_ending=f"{worker_count}&from=now-1y&to=now&var-platform=AWS&var-platform=Azure&var-platform=GCP&var-platform=IBMCloud&var-platform=AlibabaCloud&var-platform=VSphere&var-platform=rosa&var-clusterType=rosa&var-clusterType=self-managed"
if workload == "ingress-perf":
if "intlab" in os.environ.get("ES_URL"):
data_source = "be0f4aff-4122-43cf-95dd-fd51c012a208"
else:
data_source = "beefdfd9-800e-430c-afef-383032aa2d1f"

grafana_url_ending += f"&var-infraNodesType={workload_details['infraNodesType']}"
print(f"grafana url https://grafana.rdu2.scalelab.redhat.com:3000/d/d6105ff8-bc26-4d64-951e-56da771b703d/ingress-perf?orgId=1&var-datasource=beefdfd9-800e-430c-afef-383032aa2d1f&var-Datasource={data_source}{uuid_str}=&var-termination=edge&var-termination=http&var-termination=passthrough&var-termination=reencrypt&var-latency_metric=avg_lat_us&var-compare_by=uuid.keyword{major_version}{grafana_url_ending}")
print(f"grafana report mode link: https://grafana.rdu2.scalelab.redhat.com:3000/d/df906760-b4c0-44cc-9ecb-586cf39f9bab/ingress-perf-v2-report-mode?orgId=1&var-datasource={data_source}{uuid_str}&var-ocpMajorVersion=All&var-uuid=&var-termination=All&var-latency_metric=avg_lat_us&var-compare_by=ocpMajorVersion.keyword&var-all_uuids=All{grafana_url_ending}")
elif workload == "k8s-netperf" or workload == "network-perf-v2":

if "intlab" in os.environ.get("ES_URL"):
data_source = 'abc72863-3b49-47d5-98d1-357a9559afea'
else:
data_source = "rKPTw9UVz"
print(f"grafana url https://grafana.rdu2.scalelab.redhat.com:3000/d/wINGhybVz/k8s-netperf?orgId=1&var-datasource={data_source}{uuid_str}&var-termination=edge&var-termination=http&var-termination=passthrough&var-termination=reencrypt&var-latency_metric=avg_lat_us&var-compare_by=uuid.keyword&var-workerNodesCount=9&from=now-1y&to=now&var-platform=All&var-clusterType=rosa&var-clusterType=self-managed&var-workerNodesType=All&var-hostNetwork=All&var-service=All&var-parallelism=All&var-throughput_profile=All&var-latency_profile=All&var-messageSize=All&var-driver=netperf")
else:
if "intlab" in os.environ.get("ES_URL"):
data_source = "ab3f14e6-a50f-4d52-93fa-a5076794f864"
else:
data_source = "QzcDu7T4z"
print( f"grafana url https://grafana.rdu2.scalelab.redhat.com:3000/d/g4dJlkBnz3/kube-burner-compare?orgId=1&var-Datasource={data_source}&var-sdn=OVNKubernetes&var-workload={workload}&var-latencyPercentile=P99&var-condition=Ready&var-component=kube-apiserver{uuid_str}{grafana_url_ending}")

print(f"grafana report mode link: https://grafana.rdu2.scalelab.redhat.com:3000/d/D5E8c5XVz/kube-burner-report-mode?orgId=1&var-Datasource={data_source}&var-sdn=OVNKubernetes&var-clusterType=rosa&var-clusterType=self-managed&var-job={workload}{major_version}&var-compare_by=metadata.ocpMajorVersion&var-component=kube-apiserver&var-component=kube-controller-manager&var-node_roles=masters&var-node_roles=workers&var-node_roles=infra&to=now{uuid_str}{grafana_url_ending}")

#
get_graphana()

0 comments on commit 44384f5

Please sign in to comment.