-
Notifications
You must be signed in to change notification settings - Fork 25
/
Copy pathgunicorn_config.py
114 lines (91 loc) · 4.11 KB
/
gunicorn_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import os
from notifications_utils.gunicorn_defaults import set_gunicorn_defaults
set_gunicorn_defaults(globals())
# importing child_exit from gds_metrics.gunicorn has the side effect of eagerly importing
# prometheus_client, flask, werkzeug and more, which is a bad idea to do before eventlet
# has done its monkeypatching. use a nested import for the rare cases child_exit is actually
# called instead.
def child_exit(server, worker):
from prometheus_client import multiprocess
multiprocess.mark_process_dead(worker.pid)
workers = 4
worker_class = "eventlet"
worker_connections = 8 # limit runaway greenthread creation
statsd_host = "{}:8125".format(os.getenv("STATSD_HOST"))
keepalive = 0 # disable temporarily for diagnosing issues
timeout = int(os.getenv("HTTP_SERVE_TIMEOUT_SECONDS", 30)) # though has little effect with eventlet worker_class
debug_post_threshold = os.getenv("NOTIFY_GUNICORN_DEBUG_POST_REQUEST_LOG_THRESHOLD_SECONDS", None)
if debug_post_threshold:
debug_post_threshold_float = float(debug_post_threshold)
profiler = None
def pre_request(worker, req):
# using os.times() to avoid additional imports before eventlet monkeypatching
req._pre_request_elapsed = os.times().elapsed
def _process_item(process, attr, process_times_before):
value = process.info[attr]
if attr == "cpu_times":
if process.pid in process_times_before:
value = [
after - before
for before, after in zip(process_times_before[process.pid], process.info["cpu_times"], strict=False)
]
else:
# we have no way of giving a sensible value
value = None
if isinstance(value, tuple):
# convert to list for more compact (and json-able) representation
return list(value)
return value
def post_request(worker, req, environ, resp):
elapsed = os.times().elapsed - req._pre_request_elapsed
if elapsed > debug_post_threshold_float:
import json
import time
from io import StringIO
from os import getpid
from pstats import Stats
import psutil
from eventlet.green import profile
# include cpu_percent to give later call a "start time" to work with
process_times_before = {
p.pid: p.info["cpu_times"] for p in psutil.process_iter(["cpu_percent", "cpu_times"])
}
global profiler
if profiler is None:
profiler = profile.Profile()
should_profile = not getattr(profiler, "running", False)
if should_profile:
profiler.start()
perf_counter_before = time.perf_counter()
time.sleep(0.1) # period over which to profile and calculate cpu_percent
perf_counter_after = time.perf_counter()
if should_profile:
profiler.stop()
prof_out_sio = StringIO()
s = Stats(profiler, stream=prof_out_sio)
s.sort_stats("cumulative")
s.print_stats(0.1)
prof_out_str = prof_out_sio.getvalue()
else:
prof_out_str = "profiler already running - no profile collected"
attrs = ["pid", "name", "cpu_percent", "cpu_times", "status", "memory_info"]
context = {
"actual_profile_period": perf_counter_after - perf_counter_before,
"request_time": elapsed,
"processes": json.dumps(
[
attrs,
[
[_process_item(p, a, process_times_before) for a in attrs]
for p in psutil.process_iter(attrs)
],
]
),
"profile": prof_out_str,
"process_": getpid(),
}
worker.log.info(
"post-request diagnostics for request of %(request_time)ss",
context,
extra=context,
)