Initial test commit

This commit is contained in:
Alexey Barabanov
2025-08-11 13:38:20 +03:00
parent d85ae1316a
commit 3e670d3722
9 changed files with 4459 additions and 0 deletions

400
AF_script_test_1.py Normal file
View File

@@ -0,0 +1,400 @@
# NAS_IP='walle.barabanov.tv'
# NAS_PORT='443'
# NAS_FILE='/mydrive/Drive/Anons.osheet'
NAS_USER='aescript'
NAS_PASS='@5j15SduIhP7'
NAS_IP='edit.tvstart.ru'
NAS_PORT='443'
NAS_FILE='/team-folders/nexrender/Anons.osheet'
import logging
from pprint import pprint
from synology_drive_api.drive import SynologyDrive
import pandas as pd
from transliterate import translit
import requests
from time import sleep
import datetime
logger = logging.getLogger(__name__)
logging.basicConfig(filename='AF_script.log', level=logging.INFO,format='%(asctime)s %(levelname)s %(message)s')
def load_osheet():
logger.info('Get data')
synd = SynologyDrive(NAS_USER, NAS_PASS, NAS_IP,NAS_PORT,https=True,dsm_version='7')
try:
logger.info(synd.login()) # Проверка что ссеия установлена.
try:
logger.debug('Try to download sheet')
bio = synd.download_synology_office_file(NAS_FILE)
logger.debug(bio)
logger.info('Download Success')
return bio
except:
logger.warning('Download fails')
except:
logger.warning('Login error')
def get_start(osheet):
logger.info('Read Start page')
try:
sheet = pd.read_excel(osheet, sheet_name='Start',header=1)
sheet=sheet[sheet['STATE']==False] # Проверка "первая"
logger.debug('Проверка 1')
logger.debug(sheet)
logger.debug("Удаление строк с отсутствием 'DATA','TIME','SPORT','LEAGUE'")
sheet.dropna(subset=['DATA','TIME','SPORT','LEAGUE'], inplace=True)
logger.debug(sheet)
logger.info('Parsing OK')
return sheet
except:
logger.warning('error while read excel sheet')
def get_packs(osheet):
logger.info('Read SPORT page')
try:
sheet = pd.read_excel(osheet, sheet_name='SPORT',header=0,index_col='SPORT')
logger.debug(sheet)
logger.info('Parsing OK')
return sheet[sheet.index.notna()]
except:
logger.warning('error while read excel sheet')
raise
def get_logos(osheet):
logger.info('Read TEAMS page')
try:
sheet = pd.read_excel(osheet, sheet_name='TEAMS',header=0,index_col=[0,1])
logger.debug('Проверка "первая"')
logger.debug(sheet)
logger.debug("Удаление строк с отсутствием 'TEAM','LINK'")
sheet.dropna(subset=['LINK'], inplace=True)
logger.debug(sheet)
logger.info('Parsing OK')
return sheet
except:
logger.warning('error while read excel sheet')
def get_sport_logo(sport,pack):
logger.info('Get '+sport+' pack')
try:
d=pack.loc[sport]['LINK']
logger.debug(d)
if pd.isna(d):
logger.warning(f'There is no LINK for sport "{sport}"')
return ''
return d
except Exception as inst:
logger.warning("Couldn't get "+sport+" pack")
logger.warning(inst)
return ''
def get_team_logo(team,sport,logos):
logger.info(f'Get {team}/{sport} logo')
try:
d=logos.loc[team,sport]['LINK']
logger.debug(d)
return d
except KeyError as inst:
logger.warning(f"There is no LINK for sport {team}/{sport}")
return ''
def make_name(ds,pack,logos):
logger.info('Start make name')
fn=''
data={}
empty_sport=pack.iloc[0].name
if isinstance(ds['DATA'],str):
fn+=f"{ds['DATA'][6:]}{ds['DATA'][3:5]}{ds['DATA'][0:2]}"
elif isinstance(ds['DATA'],datetime.date):
fn+=f"{ds['DATA'].year}{ds['DATA'].month:02}{ds['DATA'].day:02}"
#Если нет оформления
if ds['SPORT']!=empty_sport:
fn+=f"_{ds['SPORT']}"
data['sport']=ds['SPORT']
data['pack']=unc2uri(get_sport_logo(ds['SPORT'],pack))
else:
data['sport']=''
data['pack']=''
fn+=f'_{ds["LEAGUE"]}'
#Если нет команд
if pd.isna(ds['TEAM A']):
logger.info('No Team A present')
data['team_a']=''
data['team_a_logo']=''
else:
fn+=f"_{ds['TEAM A']}"
data['team_a']=ds['TEAM A']
data['team_a_logo']=unc2uri(get_team_logo(ds['TEAM A'],ds['SPORT'],logos))
if pd.isna(ds['TEAM B']):
logger.info('No Team B present')
data['team_b']=''
data['team_b_logo']=''
else:
fn+=f"_{ds['TEAM B']}"
data['team_b']=ds['TEAM B']
data['team_b_logo']=unc2uri(get_team_logo(ds['TEAM B'],ds['SPORT'],logos))
fn=translit(fn,reversed=True)
fn=fn.replace(' ','-')
fn=fn.replace("'",'')
data['outfile_name']=fn
data['league']=ds['LEAGUE']
if isinstance(ds['TIME'],str):
t=ds['TIME'].split(':')
# data['time']=':'.join(t[0:2])
data['time_h']= t[0]
data['time_m']= t[1]
elif isinstance(ds['TIME'],datetime.time):
data['time_h']= str(ds['TIME'].hour)
data['time_m']= str(ds['TIME'].minute)
if isinstance(ds['DATA'],str):
d=ds['DATA'].split('.')
d=f"{int(d[0])} {['','января','февраля','марта','апреля','мая','июня','июля','августа','сентября','октября','ноября','декабря'][int(d[1])]}"
elif isinstance(ds['DATA'],datetime.date):
d=f"{ds['DATA'].day} {['','января','февраля','марта','апреля','мая','июня','июля','августа','сентября','октября','ноября','декабря'][ds['DATA'].month]}"
data['data']=d
logger.debug(data)
logger.debug(fn)
logger.info("End make name")
watch_list=[]
watch_list.append(send_job(data))
if ds['TRIPPLE']:
data['data']='сегодня'
data['outfile_name']=fn+'_Today'
watch_list.append(send_job(data))
data['data']='завтра'
data['outfile_name']=fn+'_Tomorrow'
watch_list.append(send_job(data))
pprint(watch_list)
return list(filter(None,watch_list))
def send_job(data):
payload={}
payload["template"]={"src": "file:///c:/users/virtVmix-2/Downloads/PackShot_Sborka_eng.aepx",
"composition": "pack",
"outputModule": "Start_h264",
"outputExt": "mp4"}
payload['actions']={
"postrender": [
{
"module": "@nexrender/action-encode",
"preset": "mp4",
"output": "encoded.mp4"
},
{
"module": "@nexrender/action-copy",
"input": "encoded.mp4",
"output": f"//10.10.35.3/edit/Auto_Anons/{data['outfile_name']}.mp4"
}
]
}
payload['assets']=[]
#Дата из файла и "сегодня"/"завтра"
#Размер текста
if data['data'] == 'сегодня':
payload['assets'].append({
"layerName": "DATA",
"property": "Source Text.fontSize",
"type": "data",
"value": "95"
})
logger.info('For "'+data['data']+'" font set to 95')
#Размер текста
elif data['data'] == 'завтра':
payload['assets'].append({
"layerName": "DATA",
"property": "Source Text.fontSize",
"type": "data",
"value": "109"
})
logger.info('For "'+data['data']+'" font set to 109')
payload['assets'].append({
"type": "data",
"layerName": "DATA",
"property": "Source Text",
"value": data['data']
})
#Время
if len(data['time_h'])<2:
payload['assets'].append({
"layerName": "TIME_H",
"property": "transform.anchorPoint",
"type": "data",
"value": [37,0]
})
payload['assets'].append({
"layerName": "TIME_M",
"property": "transform.anchorPoint",
"type": "data",
"value": [37,0]
})
payload['assets'].append({
"layerName": "TIME",
"property": "transform.anchorPoint",
"type": "data",
"value": [37,0]
})
logger.info('Shifting the "Time" by 37 pixels')
payload['assets'].append({
"type": "data",
"layerName": "TIME_H",
"property": "Source Text",
"value": data['time_h']
})
payload['assets'].append({
"type": "data",
"layerName": "TIME_M",
"property": "Source Text",
"value": data['time_m']
})
#Лига
payload['assets'].append({
"type": "data",
"layerName": "LEAGUE",
"property": "Source Text",
"value": data['league']
})
#Размер текста
if len(data['league'])>16:
payload['assets'].append({
"layerName": "LEAGUE",
"property": "Source Text.fontSize",
"type": "data",
"value": "73"
})
logger.info('For "'+data['league']+'" font set to 73')
#Спорт
if data['sport']:
payload['assets'].append({
"type": "data",
"layerName": "SPORT",
"property": "Source Text",
"value": data['sport']
})
#Команда А
if data['team_a']:
payload['assets'].append({
"type": "data",
"layerName": "TEAM_A",
"property": "Source Text",
"value": data['team_a']
})
#Команда Б
if data['team_b']:
payload['assets'].append({
"type": "data",
"layerName": "TEAM_B",
"property": "Source Text",
"value": data['team_b']
})
#Логотип А
if data['team_a_logo']:
payload['assets'].append({
"src": data['team_a_logo'],
"type": "image",
"layerName": "TEAM_A_LOGO"
})
#Логотип Б
if data['team_b_logo']:
payload['assets'].append({
"src": data['team_b_logo'],
"type": "image",
"layerName": "TEAM_B_LOGO"
})
#Верхнее оформление
if data['pack']:
payload['assets'].append({
"src": data['pack'],
"type": "video",
"layerName": "TOP"
})
url='http://10.10.2.20:3000/api/v1/jobs'
r=requests.post(url,json=payload)
if r.status_code==200:
res=r.json()
# pprint(res)
uid=res['uid']
return {'uid':uid,'outname':data['outfile_name']}
def unc2uri(unc):
if unc[:2]=='\\\\':
uri=f"file:{unc.replace('\\','/')}"
else:
uri=unc
return uri
logger.info('Start!') # Начинаем
osheet=load_osheet()
start=get_start(osheet)
pack=get_packs(osheet)
logos=get_logos(osheet)
#Удаляем прошлые задания которые закончились или с оштбкой
r=requests.get('http://10.10.2.20:3000/api/v1/jobs')
if r.status_code==200:
jobs=r.json()
s=[{'uid':i['uid'],'state':i['state']} for i in jobs]
for job in s:
if job['state'] in ('finished', 'error'):
requests.delete(f"http://10.10.2.20:3000/api/v1/jobs/{job['uid']}")
watch_list=[]
for row in start.iterrows():
row=row[1]
watch_list+=make_name(row,pack,logos)
logger.info(f"Queued {len(watch_list)} jobs")
while watch_list:
sleep(60)
for job in watch_list:
r=requests.get(f"http://10.10.2.20:3000/api/v1/jobs/{job['uid']}")
if r.status_code==200 and r.json()['state'] in ('finished', 'error'):
watch_list.remove(job)
logger.info(f"{job}, {r.json()['state']}, {len(watch_list)} to go")
print('.',end="")
logger.info('End!') # Заканчиваем
# with open('myjob.json') as myjob:
# headers={'content-type':'application/json'}
# print('start request')
# r=requests.post('http://10.10.2.20:3000/api/v1/jobs',
# headers=headers, data=myjob.read())
# print('end request')
# print(r.status_code)
#curl http://10.10.2.20:3000/api/v1/jobs >jobs.json
# import json
# with open('jobs.json') as f:
# jobs=json.load(f)
# s=[(i['uid'],i['state']) for i in jobs]
# pprint(s)