5 Commits

Author SHA1 Message Date
90aa58239c adds generation of model-training dataset 2023-08-14 15:37:30 +02:00
1beff96ae9 adds model training code 2023-08-14 15:37:05 +02:00
881d3d6d6d adds tweet-text-cleaning functions 2023-08-14 15:36:46 +02:00
5a63c478e9 adds dataset profiler 2023-08-08 15:32:12 +02:00
ed61d52182 adds files to gitignore 2023-08-08 00:07:42 +02:00
7 changed files with 690 additions and 7 deletions

1
.vscode/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/settings.json

View File

@@ -11,6 +11,8 @@ import pandas as pd
import numpy as np import numpy as np
from funs.ClearDupes import deDupe from funs.ClearDupes import deDupe
# Seet for training dataset generation
seed = 86431891
################### ###################
# Setup directories # Setup directories
@@ -34,11 +36,13 @@ senDataset = "senators-raw.csv"
# Name of new datafile generated # Name of new datafile generated
senCSVc = "SenatorsTweets-Final" senCSVc = "SenatorsTweets-Final"
senCSVcCov = "SenatorsTweets-OnlyCov" senCSVcCov = "SenatorsTweets-OnlyCov"
senCSVcTrain = "SenatorsTweets-Training"
# don't change this one # don't change this one
senCSVPath = wd + ud + senCSV senCSVPath = wd + ud + senCSV
senCSVcPath = wd + ud + senCSVc + ".csv" senCSVcPath = wd + ud + senCSVc + ".csv"
senCSVcCovPath = wd + ud + senCSVcCov + ".csv" senCSVcCovPath = wd + ud + senCSVcCov + ".csv"
senCSVcTrainPath = wd + ud + senCSVcTrain + ".csv"
senSAVcPath = wd + ud + senCSV + ".sav" senSAVcPath = wd + ud + senCSV + ".sav"
senDTAcPath = wd + ud + senCSV + ".dta" senDTAcPath = wd + ud + senCSV + ".dta"
senDatasetPath = wd + di + senDataset senDatasetPath = wd + di + senDataset
@@ -188,7 +192,6 @@ dfCov = dfAll[dfAll['contains_counterKeyword']==False]
dfCov = dfCov[dfCov['contains_keyword']==True] dfCov = dfCov[dfCov['contains_keyword']==True]
dfCov = dfCov.drop(columns=['contains_counterKeyword', 'counterKeywords']) dfCov = dfCov.drop(columns=['contains_counterKeyword', 'counterKeywords'])
#%% #%%
# create column with tweet length # create column with tweet length
@@ -211,3 +214,14 @@ dfCov.to_csv(senCSVcCovPath, encoding='utf-8', index_label = 'id')
# ========================= # =========================
# %% # %%
# Create training dataset
np.random.seed(seed);
dfTrain = pd.dfCov(np.random.rand(1800))
# %%
# Create training dataset
np.random.seed(seed);
dfTrain = dfCov.loc[np.random.choice(dfCov.index, 1800, replace=False)]
dfTrain = dfTrain[['tid', 'date', 'rawContent']]
dfTrain['topicCovid'] = True
dfTrain['fake'] = False
dfTrain.to_csv(senCSVcTrainPath, encoding='utf-8')

8
data/OUT/.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
/ALL-SENATORS-TWEETS.csv
/Pretest-Prep.csv
/Pretest-Results.csv
/Pretest-SENATORS-TWEETS.csv
/SenatorsTweets-Final.csv
/SenatorsTweets-OnlyCov.csv
/Tweets-Classified-Prep.csv
/Tweets-Stub.csv

3
data/OUT/graphs/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
/Timeline.png
/Wordcloud-All.png
/Wordcloud-Cov.png

View File

@@ -5,7 +5,6 @@ def remove_URL(text):
url = re.compile(r'https?://\S+|www\.\S+') url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'', text) return url.sub(r'', text)
def remove_emoji(text): def remove_emoji(text):
emoji_pattern = re.compile( emoji_pattern = re.compile(
'[' '['
@@ -19,21 +18,61 @@ def remove_emoji(text):
flags=re.UNICODE) flags=re.UNICODE)
return emoji_pattern.sub(r'', text) return emoji_pattern.sub(r'', text)
def remove_html(text): def remove_html(text):
html = re.compile(r'<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});') html = re.compile(r'<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
return re.sub(html, '', text) return re.sub(html, '', text)
def remove_punct(text): def remove_punct(text):
table = str.maketrans('', '', string.punctuation) table = str.maketrans('', '', string.punctuation)
return text.translate(table) return text.translate(table)
def clean_all(text): def remove_nonascii(text):
if not isinstance(text, str): return re.sub(r'[^\x00-\x7F]+', '', text)
text = str(text) # Convert non-string values to string
def remove_spec(text):
text = re.sub(r'&amp;?', r'and', text)
text = re.sub(r'&lt;', r'<', text)
return re.sub(r'&gt;', r'>', text)
def remove_spaces(text): # also new line chars and to lower case
text = re.sub(r'&lt;', r'<', text)
text = " ".join(text.splitlines()) # remove newline characters
text = text.lower()
text = text.strip()
return re.sub(r'\s{2,}', ' ', text)
def remove_retw(text):
text = re.sub(r'(RT|rt)[ ]*@[ ]*[\S]+', '', text)
return re.sub(r'@[\S]+', '', text)
def preprocess_text(text):
text = remove_URL(text) text = remove_URL(text)
text = remove_emoji(text) text = remove_emoji(text)
text = remove_html(text) text = remove_html(text)
text = remove_punct(text) text = remove_punct(text)
text = remove_nonascii(text)
text = remove_spec(text)
text = remove_spaces(text)
text = remove_retw(text)
return text return text
def preprocess_text_series(series):
series = series.apply(remove_URL)
series = series.apply(remove_emoji)
series = series.apply(remove_html)
series = series.apply(remove_punct)
series = series.apply(remove_nonascii)
series = series.apply(remove_spec)
series = series.apply(remove_spaces)
series = series.apply(remove_retw)
return series
# Check all functions:
input_text = """
Check out this amazing website: https://www.example.com! 😃
<html>This is an HTML tag.</html>
RT @user123: Just received a package from @companyXYZ. It's awesome! 📦
This is a test text with lots of punctuations!!! Can't wait to see more...
"""
processed_text = preprocess_text(input_text)
# print(processed_text)

55
profiler.py Normal file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 14:49:02 2023
@author: michael
"""
import pandas as pd
import pandas_profiling as pp
import numpy
###################
# Setup directories
# WD Michael
wd = "/home/michael/Documents/PS/Data/collectTweets/"
# WD Server
# wd = '/home/yunohost.multimedia/polsoc/Politics & Society/TweetCollection/'
# datafile input directory
di = "data/IN/"
# Tweet-datafile output directory
ud = "data/OUT/"
# Name of file that all senator data will be written to
senCSV = "ALL-SENATORS-TWEETS.csv"
# Name of file that all senator data will be written to
senDataset = "senators-raw.csv"
# Name of new datafile generated
senCSVc = "SenatorsTweets-Final"
senCSVcCov = "SenatorsTweets-OnlyCov"
# don't change this one
senCSVPath = wd + ud + senCSV
senCSVcPath = wd + ud + senCSVc + ".csv"
senCSVcCovPath = wd + ud + senCSVcCov + ".csv"
senSAVcPath = wd + ud + senCSV + ".sav"
senDTAcPath = wd + ud + senCSV + ".dta"
senDatasetPath = wd + di + senDataset
# forming dataframe and printing
df = pd.read_csv(senCSVPath, dtype=(object))
# forming ProfileReport and save
# as output.html file
profileAll = pp.ProfileReport(df, minimal=True)
profileAll.to_file("data/OUT/profiles/AllTweets.html")
df = pd.read_csv(senCSVcCovPath, dtype=(object))
profileAll = pp.ProfileReport(df, minimal=True)
profileAll.to_file("data/OUT/profiles/CovTweets.html")

563
train.py Normal file
View File

@@ -0,0 +1,563 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 12 12:25:18 2023
@author: michael
"""
from datasets import load_dataset
from transformers import Trainer
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import torch
import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
import pandas as pd
## Follow these two guides:
# best one https://mccormickml.com/2019/07/22/BERT-fine-tuning/
# https://xiangyutang2.github.io/tweet-classification/
# https://medium.com/mlearning-ai/fine-tuning-bert-for-tweets-classification-ft-hugging-face-8afebadd5dbf
###################
# Setup directories
# WD Michael
wd = "/home/michael/Documents/PS/Data/collectTweets/"
# WD Server
# wd = '/home/yunohost.multimedia/polsoc/Politics & Society/TweetCollection/'
import sys
funs = wd+"funs"
sys.path.insert(1, funs)
import CleanTweets
# datafile input directory
di = "data/IN/"
# Tweet-datafile output directory
ud = "data/OUT/"
# Training CSV dataset
twtCSV = "SenatorsTweets-Training_WORKING-COPY-correct"
twtCSVtrainCovClass = "SenatorsTweets-train-CovClassification"
twtCSVtrainFakeClass = "SenatorsTweets-train-FakeClassification"
# Name of new datafile generated
senCSVprep = "SenatorsTweets-Training_WORKING-COPY-prepared"
# don't change this one
twtCSVPath = wd + ud + twtCSV + ".csv"
twtCSVtrainCovClassPath = wd + ud + twtCSVtrainCovClass + ".csv" # may be unnecessary
twtCSVtrainFakeClassPath = wd + ud + twtCSVtrainFakeClass + ".csv" # may be unnecessary
twtCSVtrainCovClassPathTrain = wd + ud + twtCSVtrainCovClass + "TRAIN.csv" # may be unnecessary
twtCSVtrainFakeClassPathTrain = wd + ud + twtCSVtrainFakeClass + "TRAIN.csv" # may be unnecessary
twtTSVtrainCovClassPathTrain = wd + ud + "cov-train.tsv"
twtTSVtrainFakeClassPathTrain = wd + ud + "fake-train.tsv"
twtTSVtrainCovClassPathEval = wd + ud + "cov-eval.tsv" # may be unnecessary
twtTSVtrainFakeClassPathEval = wd + ud + "fake-eval.tsv" # may be unnecessary
model_name = 'digitalepidemiologylab/covid-twitter-bert-v2'
tokenizer = AutoTokenizer.from_pretrained(model_name)
max_length = 64 # max token sentence length
##
# Create training and testing dataset
dfTest = pd.read_csv(twtCSVPath, dtype=(object), delimiter=";")
dfTest = dfTest[:-800] # remove last 800 rows
dfTest = dfTest.iloc[:,:-3] # remove last 800 rows
dfTest['text'] = dfTest['rawContent'].apply(CleanTweets.preprocess_text)
dfTest.drop(columns=['rawContent'], inplace=True)
# Only keep tweets that are longer than 3 words
dfTest['tweet_proc_length'] = [len(text.split(' ')) for text in dfTest['text']]
dfTest['tweet_proc_length'].value_counts()
dfTest = dfTest[dfTest['tweet_proc_length']>3]
dfTest = dfTest.drop_duplicates(subset=['text'])
# Create datasets for each classification
dfCovClass = dfTest
dfCovClass = dfCovClass.drop(columns=['fake', 'date', 'Unnamed: 0']) # fake column not neeeded in covid topic classification data
dfFakeClass = dfTest
dfFakeClass = dfFakeClass.drop(columns=['topicCovid', 'date', 'Unnamed: 0']) # topicCovid column not neeeded in covid topic classification data
#type_map = {'Covid tweet': 'covid tweets', 'Noncovid tweet': 'noncovid tweet'}
dfCovClass.rename(index = str, columns={'topicCovid': 'labels', 'tid': 'id'}, inplace = True)
dfCovClass.labels = dfCovClass.labels.replace({"True": 'Covid', "False": 'NonCovid'})
#type_map = {'fake news tweet': 'fake news tweet', 'non-fake-news-tweet': 'non-fake-news-tweet'}
dfFakeClass.rename(index = str, columns={'fake': 'labels', 'tid': 'id'}, inplace = True)
dfFakeClass.labels = dfFakeClass.labels.replace({"True": 'Fake', "False": 'True'})
##
# Tokenize tweets
#dfCovClass['input_ids'] = dfCovClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
#dfFakeClass['input_ids'] = dfFakeClass['text'].apply(lambda x: tokenizer(x, max_length=max_length, padding="max_length",)['input_ids'])
def encode_labels(label):
if label == 'Covid':
return 1
elif label == 'NonCovid':
return 0
elif label == 'Fake':
return 1
elif label == 'True':
return 0
return 0
dfCovClass['labels_encoded'] = dfCovClass['labels'].apply(encode_labels)
dfFakeClass['labels_encoded'] = dfFakeClass['labels'].apply(encode_labels)
#save dfs as csvs
dfCovClass = dfCovClass.drop(columns=['tweet_proc_length'])
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainCovClassPathTrain, encoding='utf-8', sep=";")
dfCovClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathTrain, encoding='utf-8', sep="\t")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainCovClassPath, encoding='utf-8', sep=";")
dfCovClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainCovClassPathEval, encoding='utf-8', sep="\t")
dfFakeClass = dfFakeClass.drop(columns=['tweet_proc_length'])
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPathTrain, encoding='utf-8', sep=";")
dfFakeClass[200:1000].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathTrain, encoding='utf-8', sep="\t")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtCSVtrainFakeClassPath, encoding='utf-8', sep=";")
dfFakeClass[0:199].reset_index(drop=True).to_csv(twtTSVtrainFakeClassPathEval, encoding='utf-8', sep="\t")
##
# Prepare trainer
from transformers import TrainingArguments
training_args = TrainingArguments(
# report_to = 'wandb',
output_dir=wd+'results', # output directory
overwrite_output_dir = True,
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=16, # batch size for evaluation
learning_rate=2e-5,
warmup_steps=1000, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs3', # directory for storing logs
logging_steps=1000,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
from transformers import BertForSequenceClassification, AdamW, BertConfig
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
"""
train_dataset = load_dataset('csv', data_files={'train': twtCSVtrainCovClassPathTrain}, encoding = "utf-8")
train_dataset = train_dataset['train']
eval_dataset = load_dataset('csv', data_files={'test': twtCSVtrainCovClassPath}, encoding = "utf-8")
eval_dataset = eval_dataset['test']
"""
batch_size = 16
from torch.utils.data import Dataset
class PandasDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_length):
self.dataframe = dataframe
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.dataframe)
def __getitem__(self, index):
row = self.dataframe.iloc[index]
text = row['text']
labels = row['labels_encoded']
encoded = self.tokenizer(text, max_length=self.max_length, padding="max_length", truncation=True)
input_ids = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': torch.tensor(labels) # Assuming labels are already encoded
}
df = pd.read_csv(twtCSVtrainCovClassPathTrain, delimiter=";")
train_dataset = PandasDataset(df, tokenizer, max_length)
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size
)
df = pd.read_csv(twtCSVtrainCovClassPath, delimiter=";")
eval_dataset = PandasDataset(df, tokenizer, max_length)
validation_dataloader = DataLoader(
eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=batch_size
)
for idx, batch in enumerate(train_dataloader):
print('Batch index: ', idx)
print('Batch size: ', batch['input_ids'].size()) # Access 'input_ids' field
print('Batch label: ', batch['labels']) # Access 'labels' field
break
model = BertForSequenceClassification.from_pretrained(
"digitalepidemiologylab/covid-twitter-bert-v2", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
#trainer = Trainer(
# model=model, # the instantiated 🤗 Transformers model to be trained
# args=training_args, # training arguments, defined above
# train_dataset=train_dataset, # training dataset
# eval_dataset=eval_dataset # evaluation dataset
#)
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Weight Decay fix"
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the
# training data.
epochs = 4
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
import numpy as np
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
import time
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
import random
import numpy as np
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 12355
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
#model.cuda()
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
device = torch.device("cpu")
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
print("Batch keys:", batch.keys())
b_input_ids = batch['input_ids'].to(device)
b_input_mask = batch['attention_mask'].to(device)
b_labels = batch['labels'].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = output[0]
logits = output[1]
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch['input_ids'].to(device)
b_input_mask = batch['attention_mask'].to(device)
b_labels = batch['labels'].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = output[0]
logits = output[1]
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
params = list(model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
import os
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
output_dir = wd + 'model_save/'
# Create output directory if needed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(args, os.path.join(output_dir, 'training_args.bin'))
import pandas as pd
# Display floats with two decimal places.
pd.set_option('precision', 2)
# Create a DataFrame from our training statistics.
df_stats = pd.DataFrame(data=training_stats)
# Use the 'epoch' as the row index.
df_stats = df_stats.set_index('epoch')
# A hack to force the column headers to wrap.
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
# Display the table.
df_stats