Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
C
configuration
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
configuration
Commits
aa646ee8
Commit
aa646ee8
authored
Jun 13, 2018
by
Kevin Falcone
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Loop over all redis queues
This keeps us under the 20 metrics per request AWS API Limit. OPS-3196
parent
566f79d3
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
56 additions
and
56 deletions
+56
-56
util/jenkins/check-celery-queues.py
+56
-56
No files found.
util/jenkins/check-celery-queues.py
View file @
aa646ee8
...
@@ -3,6 +3,7 @@ import click
...
@@ -3,6 +3,7 @@ import click
import
boto3
import
boto3
import
botocore
import
botocore
import
backoff
import
backoff
from
itertools
import
zip_longest
max_tries
=
5
max_tries
=
5
...
@@ -105,62 +106,61 @@ def check_queues(host, port, environment, deploy, max_metrics, threshold,
...
@@ -105,62 +106,61 @@ def check_queues(host, port, environment, deploy, max_metrics, threshold,
set
(
redis_queues
)
.
difference
(
existing_queues
)
set
(
redis_queues
)
.
difference
(
existing_queues
)
)
)
if
len
(
all_queues
)
>
max_metrics
:
for
queues
in
grouper
(
all_queues
,
max_metrics
):
# TODO: Use proper logging framework
queues
=
[
q
for
q
in
queues
if
q
is
not
None
]
print
(
"Warning! Too many metrics, refusing to publish more than {}"
metric_data
=
[]
.
format
(
max_metrics
))
for
queue
in
queues
:
metric_data
.
append
({
# Filter redis_queues out of all_queues and then take the max_metrics
'MetricName'
:
metric_name
,
# portion of that.
'Dimensions'
:
[{
queues
=
[
q
for
q
in
all_queues
if
q
in
redis_queues
]
"Name"
:
dimension
,
queues
=
queues
[:
max_metrics
]
"Value"
:
queue
}],
metric_data
=
[]
'Value'
:
redis_client
.
llen
(
queue
)
for
queue
in
queues
:
})
metric_data
.
append
({
'MetricName'
:
metric_name
,
if
len
(
metric_data
)
>
0
:
'Dimensions'
:
[{
cloudwatch
.
put_metric_data
(
Namespace
=
namespace
,
MetricData
=
metric_data
)
"Name"
:
dimension
,
"Value"
:
queue
for
queue
in
queues
:
}],
dimensions
=
[{
'Name'
:
dimension
,
'Value'
:
queue
}]
'Value'
:
redis_client
.
llen
(
queue
)
queue_threshold
=
threshold
})
if
queue
in
thresholds
:
queue_threshold
=
thresholds
[
queue
]
if
len
(
metric_data
)
>
0
:
# Period is in seconds
cloudwatch
.
put_metric_data
(
Namespace
=
namespace
,
MetricData
=
metric_data
)
period
=
60
evaluation_periods
=
15
for
queue
in
queues
:
comparison_operator
=
"GreaterThanThreshold"
dimensions
=
[{
'Name'
:
dimension
,
'Value'
:
queue
}]
treat_missing_data
=
"notBreaching"
queue_threshold
=
threshold
statistic
=
"Maximum"
if
queue
in
thresholds
:
actions
=
[
sns_arn
]
queue_threshold
=
thresholds
[
queue
]
alarm_name
=
"{}-{} {} queue length over threshold"
.
format
(
environment
,
# Period is in seconds
deploy
,
period
=
60
queue
)
evaluation_periods
=
15
comparison_operator
=
"GreaterThanThreshold"
print
(
'Creating or updating alarm "{}"'
.
format
(
alarm_name
))
treat_missing_data
=
"notBreaching"
cloudwatch
.
put_metric_alarm
(
AlarmName
=
alarm_name
,
statistic
=
"Maximum"
AlarmDescription
=
alarm_name
,
actions
=
[
sns_arn
]
Namespace
=
namespace
,
alarm_name
=
"{}-{} {} queue length over threshold"
.
format
(
environment
,
MetricName
=
metric_name
,
deploy
,
Dimensions
=
dimensions
,
queue
)
Period
=
period
,
EvaluationPeriods
=
evaluation_periods
,
print
(
'Creating or updating alarm "{}"'
.
format
(
alarm_name
))
TreatMissingData
=
treat_missing_data
,
cloudwatch
.
put_metric_alarm
(
AlarmName
=
alarm_name
,
Threshold
=
queue_threshold
,
AlarmDescription
=
alarm_name
,
ComparisonOperator
=
comparison_operator
,
Namespace
=
namespace
,
Statistic
=
statistic
,
MetricName
=
metric_name
,
InsufficientDataActions
=
actions
,
Dimensions
=
dimensions
,
OKActions
=
actions
,
Period
=
period
,
AlarmActions
=
actions
)
EvaluationPeriods
=
evaluation_periods
,
TreatMissingData
=
treat_missing_data
,
# Stolen right from the itertools recipes
Threshold
=
queue_threshold
,
# https://docs.python.org/3/library/itertools.html#itertools-recipes
ComparisonOperator
=
comparison_operator
,
def
grouper
(
iterable
,
n
,
fillvalue
=
None
):
Statistic
=
statistic
,
"Collect data into fixed-length chunks or blocks"
InsufficientDataActions
=
actions
,
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
OKActions
=
actions
,
args
=
[
iter
(
iterable
)]
*
n
AlarmActions
=
actions
)
return
zip_longest
(
*
args
,
fillvalue
=
fillvalue
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
check_queues
()
check_queues
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment