adds both training scripts and evaluation files of topic classification

This commit is contained in:
2023-08-15 14:19:08 +02:00
parent 90aa58239c
commit 7c6b618272
5 changed files with 769 additions and 83 deletions

216
train.py
View File

@@ -5,15 +5,13 @@ Created on Sat Aug 12 12:25:18 2023
@author: michael
"""
from datasets import load_dataset
from transformers import Trainer
from transformers import AutoModelForSequenceClassification
#from datasets import load_dataset
#from transformers import Trainer
#from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import torch
import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
import numpy as np
from sklearn.model_selection import train_test_split # pip install scikit-learn
import pandas as pd
@@ -41,39 +39,54 @@ di = "data/IN/"
ud = "data/OUT/"
# Training CSV dataset
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct"
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct2"
twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification"
twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification"
# Name of new datafile generated
senCSVprep = "SenatorsTweets-Training_WORKING-COPY-prepared"
statsTrainingTopicClass = "statsTopicClassification-"
# don't change this one
twtCSVPath = wd + ud + twtCSV + ".csv"
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv" # may be unnecessary
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv" # may be unnecessary
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv"
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv"
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv" # may be unnecessary
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv" # may be unnecessary
statsTrainingTopicClassPath = wd + ud + statsTrainingTopicClass
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv"
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv"
twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv"
twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv"
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv" # may be unnecessary
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv" # may be unnecessary
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv"
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv"
seed = 12355
# Model paths
modCovClassPath = wd + "models/CovClass/"
modFakeClassPath = wd + "models/FakeClass/"
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2' # accuracy 69
#model_name = 'justinqbui/bertweet-covid19-base-uncased-pretraining-covid-vaccine-tweets' #48
#model_name = "cardiffnlp/tweet-topic-latest-multi"
model_name = "bvrau/covid-twitter-bert-v2-struth"
#model_name = "cardiffnlp/roberta-base-tweet-topic-single-all"
model_fake_name = 'bvrau/covid-twitter-bert-v2-struth'
# More models for fake detection:
# https://huggingface.co/justinqbui/bertweet-covid-vaccine-tweets-finetuned
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2'
tokenizer = AutoTokenizer.from_pretrained(model_name)
max_length = 64 # max token sentence length
##
#%%
# Create training and testing dataset
dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";")
dfTest = dfTest[:-800] # remove last 800 rows
dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_text)
#dfTest = dfTest[:-900] # remove last 800 rows
#dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_roberta)
dfTest.drop(columns=['rawContent'], inplace=True)
@@ -82,12 +95,13 @@ dfTest['tweet_proc_length'] = [len(text.split(' ')) for text in dfTest['text']]
dfTest['tweet_proc_length'].value_counts()
dfTest = dfTest[dfTest['tweet_proc_length']>3]
dfTest = dfTest.drop_duplicates(subset=['text'])
dfTest = dfTest.drop(columns=['date', 'Unnamed: 0'])
# Create datasets for each classification
dfCovClass = dfTest
dfCovClass = dfCovClass.drop(columns=['fake', 'date', 'Unnamed: 0']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfTest
dfFakeClass = dfFakeClass.drop(columns=['topicCovid', 'date', 'Unnamed: 0']) # topicCovid column not neeeded in covid topic classification data
dfCovClass = dfCovClass.drop(columns=['fake']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfFakeClass[dfFakeClass['topicCovid']=='True'].drop(columns=['topicCovid']) # topicCovid column not neeeded in covid topic classification data
#type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'}
dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True)
@@ -97,10 +111,12 @@ dfCovClass.labels = dfCovClass.labels.replace({"True": 'Covid', "False": 'NonCov
dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True)
dfFakeClass.labels = dfFakeClass.labels.replace({"True": 'Fake', "False": 'True'})
##
#%%
# Tokenize tweets
#dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
#dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
dfCovClass = dfCovClass[dfCovClass['labels'].notna()]
dfFakeClass = dfFakeClass[dfFakeClass['labels'].notna()]
dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
def encode_labels(label):
if label == 'Covid':
@@ -115,45 +131,80 @@ def encode_labels(label):
dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels)
dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels)
# get n of classes
print("# of Non-Covid tweets (coded 0):")
print(dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
# 62 non-covid tweets, disproportionate sample for training has to be 124 tweets
#save dfs as csvs
dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
print("# of Fake-news tweets (coded 1):")
print(dfFakeClass.groupby('labels_encoded', group_keys=False)['id'].nunique())
##
# create disproportionate sample - 50/50 of both
#dfCovClass.groupby('labels_encoded', group_keys=False)['id'].nunique()
#dfCovClass = dfCovClass.groupby('labels_encoded', group_keys=False).apply(lambda x: x.sample(164, random_state=seed))
# after a lot of tests, it seems that a sample in which non-fake news tweets are overrepresented leads to better results.
# because of this, performance limitations and time constraints, group 1 (covid topic) will be overrepresented (twice as many), which still doesn't reflect the real preoportions ~10/1
'''dfCovClassa = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(1).sample(frac=1, replace=True).reset_index()
dfCovClassb = dfCovClass.groupby('labels_encoded', group_keys=False).get_group(0).sample(frac=1, replace=True).reset_index()
dfCovClassab= pd.concat([dfCovClassa,dfCovClassb])
dfCovClassab.reset_index(inplace=True)
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClassab, test_size=0.1, random_state=seed, stratify=dfCovClassab['labels_encoded'])
'''
# create training and validation samples
dfCovClass_train, dfCovClass_test = train_test_split(dfCovClass, test_size=0.1, random_state=seed, stratify=dfCovClass['labels_encoded'])
# reset index and drop unnecessary columns
dfCovClass_train.reset_index(drop=True, inplace=True)
dfCovClass_train.drop(inplace=True, columns=['tweet_proc_length'])
dfCovClass_train.groupby('labels_encoded', group_keys=False)['id'].nunique()
dfCovClass_test.reset_index(drop=True, inplace=True)
dfCovClass_test.drop(inplace=True, columns=['tweet_proc_length'])
dfCovClass_test.groupby('labels_encoded', group_keys=False)['id'].nunique()
# save dfs as csvs and tsvs, for training and validation
# covid classification datafiles
# rows 0-41 = noncovid, 42-81 covid, therfore:
#dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
#dfCovClass.reset_index(inplace=True, drop=True)
#dfCovClass.loc[np.r_[0:31, 42:71], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[0:31, 42:72], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
#dfCovClass.loc[np.r_[31:41, 72:81], :].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
# fake news classification datafiles
#dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
#dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
#dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
#%%
# Prepare trainer
from transformers import TrainingArguments
#from transformers import TrainingArguments
training_args = TrainingArguments(
#training_args = TrainingArguments(
# report_to = 'wandb',
output_dir=wd+'results', # output directory
overwrite_output_dir = True,
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=16, # batch size for evaluation
learning_rate=2e-5,
warmup_steps=1000, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs3', # directory for storing logs
logging_steps=1000,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True
)
# output_dir=wd+'results', # output directory/
# overwrite_output_dir = True,
# num_train_epochs=6, # total number of training epochs
# per_device_train_batch_size=8, # batch size per device during training
# per_device_eval_batch_size=16, # batch size for evaluation
# learning_rate=2e-5,
# warmup_steps=1000, # number of warmup steps for learning rate scheduler
# weight_decay=0.01, # strength of weight decay
# logging_dir='./logs3', # directory for storing logs
# logging_steps=1000,
# evaluation_strategy="epoch",
# save_strategy="epoch",
# load_best_model_at_end=True
#)
tokenizer = AutoTokenizer.from_pretrained(model_name)
from transformers import BertForSequenceClassification, AdamW, BertConfig
from torch.utils.data import TensorDataset, random_split
from transformers import BertForSequenceClassification, AdamW#, BertConfig
#from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
"""
@@ -162,7 +213,7 @@ train_dataset = train_dataset['train']
eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8")
eval_dataset = eval_dataset['test']
"""
batch_size = 16
batch_size = 1
from torch.utils.data import Dataset
@@ -191,17 +242,14 @@ class PandasDataset(Dataset):
}
df = pd.read_csv(twtCSVtrainCovClassPathTrain, delimiter=";")
train_dataset = PandasDataset(df, tokenizer, max_length)
train_dataset = PandasDataset(dfCovClass_train, tokenizer, max_length)
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size
)
df = pd.read_csv(twtCSVtrainCovClassPath, delimiter=";")
eval_dataset = PandasDataset(df, tokenizer, max_length)
eval_dataset = PandasDataset(dfCovClass_test, tokenizer, max_length)
validation_dataloader = DataLoader(
eval_dataset,
sampler=SequentialSampler(eval_dataset),
@@ -215,7 +263,7 @@ for idx, batch in enumerate(train_dataloader):
break
model = BertForSequenceClassification.from_pretrained(
"digitalepidemiologylab/covid-twitter-bert-v2", # Use the 12-layer BERT model, with an uncased vocab.
model_name,
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
@@ -240,9 +288,8 @@ optimizer = AdamW(model.parameters(),
from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the
# training data.
epochs = 4
# We chose to run for 6
epochs = 6
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
@@ -253,10 +300,6 @@ scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
import numpy as np
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
@@ -277,7 +320,6 @@ def format_time(elapsed):
return str(datetime.timedelta(seconds=elapsed_rounded))
import random
import numpy as np
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
@@ -307,6 +349,8 @@ np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
#%%
# Start training
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
@@ -324,14 +368,14 @@ for epoch_i in range(0, epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('{:>5,} steps per batch will be calculated.'.format(len(train_dataloader)))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
model.to(device)
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
@@ -341,8 +385,8 @@ for epoch_i in range(0, epochs):
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Progress update every 10 batches.
if step % 10 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
@@ -527,8 +571,12 @@ for p in params[-4:]:
import os
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
from datetime import datetime as dt
output_dir = wd + 'model_save/'
fTimeFormat = "%Y-%m-%d_%H-%M-%S"
now = dt.now().strftime(fTimeFormat)
output_dir = modCovClassPath + now + "/"
# Create output directory if needed
if not os.path.exists(output_dir):
@@ -548,16 +596,18 @@ tokenizer.save_pretrained(output_dir)
import pandas as pd
# Display floats with two decimal places.
pd.set_option('precision', 2)
pd.set_option('display.precision', 2)
# Create a DataFrame from our training statistics.
df_stats = pd.DataFrame(data=training_stats)
# Use the 'epoch' as the row index.
# Use the 'epoch' as the row index.# Good practice: save your training arguments together with the trained model
df_stats = df_stats.set_index('epoch')
# A hack to force the column headers to wrap.
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
# Display the table.
df_stats
df_stats
df_stats.to_csv(output_dir + now + ".csv")