Unverified Commit b71d6d31 by M. Rehan Committed by GitHub

Merge pull request #81 from edx/yro/yt_callback_bugfix

Yro/yt callback bugfix
parents d6c03c48 728ffc85
......@@ -7,6 +7,7 @@ import django
from control_env import *
from dependencies.shotgun_api3 import Shotgun
from dependencies.shotgun_api3.lib.xmlrpclib import ProtocolError
from VEDA.utils import get_config
"""
......@@ -101,26 +102,31 @@ class VedaEncode(object):
continue
def check_review_approved(self):
if self.sg_script_key is None:
return True
"""
** Mediateam only **
Check in with SG to see if this video
is authorized to go to final publishing
"""
# TODO: Move to independent API Method
if self.sg_script_key is None:
return True
video_object = Video.objects.filter(
edx_id=self.veda_id
).latest()
if video_object.inst_class.sg_projID is None:
return False
sg = Shotgun(
self.sg_server_path,
self.sg_script_name,
self.sg_script_key
)
try:
sg = Shotgun(
self.sg_server_path,
self.sg_script_name,
self.sg_script_key
)
except ProtocolError:
# Periodic API Error
return False
fields = ['project', 'entity', 'sg_status_list']
filters = [
......@@ -130,6 +136,8 @@ class VedaEncode(object):
"id": video_object.inst_class.sg_projID
}],
]
# TODO: Improve API query
tasks = sg.find("Task", filters, fields)
for t in tasks:
if t['entity']['name'] == self.veda_id.split('-')[-1]:
......@@ -137,11 +145,3 @@ class VedaEncode(object):
return True
return False
def main():
pass
if __name__ == '__main__':
sys.exit(main())
......@@ -238,6 +238,11 @@ class VALAPICall():
Determine VAL profile data, from return/encode submix
"""
# Defend against old/deprecated encodes
try:
self.auth_dict['val_profile_dict'][self.encode_profile]
except KeyError:
return
if self.endpoint_url is not None:
for p in self.auth_dict['val_profile_dict'][self.encode_profile]:
......@@ -264,6 +269,10 @@ class VALAPICall():
if final.encode_profile.product_spec == 'review':
pass
else:
try:
self.auth_dict['val_profile_dict'][final.encode_profile.product_spec]
except KeyError:
return
for p in self.auth_dict['val_profile_dict'][final.encode_profile.product_spec]:
test_list.append(dict(
url=str(final.encode_url),
......@@ -277,19 +286,19 @@ class VALAPICall():
self.encode_data.append(t)
if len(val_api_return) == 0:
return None
return
"""
All URL Records Deleted (for some reason)
"""
if len(self.encode_data) == 0:
return None
return
for i in val_api_return['encoded_videos']:
if i['profile'] not in [g['profile'] for g in self.encode_data]:
self.encode_data.append(i)
return None
return
def send_404(self):
"""
......
......@@ -63,4 +63,4 @@ val_profile_dict:
heal_start: 1
heal_end: 144
global_timeout: 40
global_timeout: 60
......@@ -11,7 +11,7 @@ import sys
import xml.etree.ElementTree as ET
from datetime import timedelta
from os.path import expanduser
from paramiko.ssh_exception import AuthenticationException
from paramiko.ssh_exception import AuthenticationException, SSHException
import django
import pysftp
......@@ -91,6 +91,8 @@ def xml_downloader(course):
crawl_sftp(d=d, s1=s1)
except AuthenticationException:
LOGGER.info("{inst}{clss} : Authentication Failed".format(inst=course.institution, clss=course.edx_classid))
except SSHException:
LOGGER.info("{inst}{clss} : Authentication Failed".format(inst=course.institution, clss=course.edx_classid))
def crawl_sftp(d, s1):
......@@ -103,43 +105,47 @@ def crawl_sftp(d, s1):
"""
dirtime = datetime.datetime.fromtimestamp(d.st_mtime)
if dirtime < datetime.datetime.now() - timedelta(days=YOUTUBE_LOOKBACK_DAYS):
return None
return
if d.filename == "files_to_be_removed.txt":
return None
return
if d.filename == 'FAILED':
return None
return
try:
s1.cwd(d.filename)
except:
return None
for f in s1.listdir_attr():
filetime = datetime.datetime.fromtimestamp(f.st_mtime)
if not filetime > datetime.datetime.now() - timedelta(days=YOUTUBE_LOOKBACK_DAYS):
continue
if fnmatch.fnmatch(f.filename, '*.xml') or fnmatch.fnmatch(f.filename, '*.csv'):
# Determine If there are extant downloaded status files for this same ID,
# If yes, increment filename
x = 0
while True:
"""
Just in case something runs out
"""
if x > 20:
break
file_to_find = f.filename.split('.')[0] + \
str(x) + \
'.' + \
f.filename.split('.')[1]
if os.path.exists(os.path.join(workdir, file_to_find)):
x += 1
else:
break
print "%s : %s" % (f.filename, file_to_find)
s1.get(
f.filename,
os.path.join(workdir, file_to_find)
)
return
try:
for f in s1.listdir_attr():
filetime = datetime.datetime.fromtimestamp(f.st_mtime)
if not filetime > datetime.datetime.now() - timedelta(days=YOUTUBE_LOOKBACK_DAYS):
continue
if fnmatch.fnmatch(f.filename, '*.xml') or fnmatch.fnmatch(f.filename, '*.csv'):
# Determine If there are extant downloaded status files for this same ID,
# If yes, increment filename
x = 0
while True:
"""
Just in case something runs out
"""
if x > 20:
break
file_to_find = f.filename.split('.')[0] + \
str(x) + \
'.' + \
f.filename.split('.')[1]
if os.path.exists(os.path.join(workdir, file_to_find)):
x += 1
else:
break
print "%s : %s" % (f.filename, file_to_find)
s1.get(
f.filename,
os.path.join(workdir, file_to_find)
)
except IOError:
return
except SSHException:
return
s1.cwd('..')
......@@ -218,6 +224,7 @@ def urlpatch(upload_data):
test_id = Video.objects.filter(edx_id=upload_data['edx_id']).latest()
except:
upload_data['status'] = 'Failure'
return
if upload_data['status'] == 'Success':
url_query = URL.objects.filter(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment