{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\franz\\AppData\\Local\\Temp\\ipykernel_16992\\1198363771.py:6: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use `tf.config.list_physical_devices('GPU')` instead.\n",
"GPU is available\n"
]
}
],
"source": [
"\n",
"import gpt_2_simple as gpt2\n",
"import os\n",
"import tensorflow as tf\n",
"import pandas as pd\n",
"import re\n",
"print(\"GPU is\", \"available\" if tf.test.is_gpu_available() else \"NOT AVAILABLE\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model_name = \"124M\"\n",
"if not os.path.isdir(os.path.join(\"models\", model_name)):\n",
"\tprint(f\"Downloading {model_name} model...\")\n",
"\tgpt2.download_gpt2(model_name=model_name) "
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"path = 'AbstractGenerator/'\n",
"checkpoint_dir =path+'weights/'\n",
"data_path = path+'TrainigData/'\n",
"\n",
"\n",
"\n",
"file_name_en = 'en'\n",
"file_path_en = data_path+file_name_en\n",
"\n",
"file_name_es = 'es'\n",
"file_path_es = data_path+file_name_es\n",
"\n",
"\n",
"prefix= '<|startoftext|>'\n",
"sufix ='<|endoftext|>'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# create trainig data"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"en = pd.read_csv('CSV\\scientific_paper_en.csv')\n",
"es = pd.read_csv('CSV\\scientific_paper_es.csv')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import codecs\n",
"def createTrainingData(ds,fileName= 'resumen.txt' ,path ='TrainigData/'):\n",
" with codecs.open(path+fileName,'a','utf-8') as f:\n",
" for i in ds.index:\n",
" f.write(prefix+\"\\n\")\n",
" f.write(ds.iloc[i]['text_no_abstract'])\n",
" f.write(\"ABSTRACT\\n\")\n",
" f.write(ds.iloc[i]['abstract']+\"\\n\")\n",
" f.write(sufix)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"createTrainingData(en,'en.txt',data_path)\n",
"createTrainingData(es,'es.txt',data_path)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# pretrained"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sess = gpt2.start_tf_sess()\n",
"gpt2.load_gpt2(sess,checkpoint_dir=checkpoint_dir,run_name='run1')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# train "
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"tf.compat.v1.reset_default_graph()\n",
"sess = gpt2.start_tf_sess()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## en"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gpt2.finetune(sess,\n",
" file_path_en+'.txt',\n",
" model_name=model_name,\n",
" checkpoint_dir=checkpoint_dir, \n",
" steps=1000\n",
" ) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## es"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loading checkpoint models\\124M\\model.ckpt\n",
"INFO:tensorflow:Restoring parameters from models\\124M\\model.ckpt\n",
"Loading dataset...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 1/1 [00:51<00:00, 51.03s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"dataset has 17511492 tokens\n",
"Training...\n"
]
},
{
"ename": "ResourceExhaustedError",
"evalue": "Graph execution error:\n\nfailed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1377\u001b[0m, in \u001b[0;36mBaseSession._do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m 1376\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m-> 1377\u001b[0m \u001b[39mreturn\u001b[39;00m fn(\u001b[39m*\u001b[39;49margs)\n\u001b[0;32m 1378\u001b[0m \u001b[39mexcept\u001b[39;00m errors\u001b[39m.\u001b[39mOpError \u001b[39mas\u001b[39;00m e:\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1360\u001b[0m, in \u001b[0;36mBaseSession._do_run.._run_fn\u001b[1;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m 1359\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_extend_graph()\n\u001b[1;32m-> 1360\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_tf_sessionrun(options, feed_dict, fetch_list,\n\u001b[0;32m 1361\u001b[0m target_list, run_metadata)\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1453\u001b[0m, in \u001b[0;36mBaseSession._call_tf_sessionrun\u001b[1;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[0;32m 1451\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_call_tf_sessionrun\u001b[39m(\u001b[39mself\u001b[39m, options, feed_dict, fetch_list, target_list,\n\u001b[0;32m 1452\u001b[0m run_metadata):\n\u001b[1;32m-> 1453\u001b[0m \u001b[39mreturn\u001b[39;00m tf_session\u001b[39m.\u001b[39;49mTF_SessionRun_wrapper(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_session, options, feed_dict,\n\u001b[0;32m 1454\u001b[0m fetch_list, target_list,\n\u001b[0;32m 1455\u001b[0m run_metadata)\n",
"\u001b[1;31mResourceExhaustedError\u001b[0m: failed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.\n",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[1;31mResourceExhaustedError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32mc:\\Users\\franz\\OneDrive\\Documentos\\GitHub\\Generador-de-abstracts\\AbstractGenerator.ipynb Cell 15'\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[0m gpt2\u001b[39m.\u001b[39;49mfinetune(sess,\n\u001b[0;32m 2\u001b[0m file_path_es\u001b[39m+\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39m.txt\u001b[39;49m\u001b[39m'\u001b[39;49m,\n\u001b[0;32m 3\u001b[0m model_name\u001b[39m=\u001b[39;49mmodel_name,\n\u001b[0;32m 4\u001b[0m checkpoint_dir\u001b[39m=\u001b[39;49mcheckpoint_dir, \n\u001b[0;32m 5\u001b[0m steps\u001b[39m=\u001b[39;49m\u001b[39m1000\u001b[39;49m\n\u001b[0;32m 6\u001b[0m )\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\gpt_2_simple\\gpt_2.py:339\u001b[0m, in \u001b[0;36mfinetune\u001b[1;34m(sess, dataset, steps, model_name, model_dir, combine, batch_size, learning_rate, accumulate_gradients, restore_from, run_name, checkpoint_dir, sample_every, sample_length, sample_num, multi_gpu, save_every, print_every, max_checkpoints, use_memory_saving_gradients, only_train_transformer_layers, optimizer, overwrite, reuse)\u001b[0m\n\u001b[0;32m 337\u001b[0m sess\u001b[39m.\u001b[39mrun(opt_reset)\n\u001b[0;32m 338\u001b[0m \u001b[39mfor\u001b[39;00m _ \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(accumulate_gradients):\n\u001b[1;32m--> 339\u001b[0m sess\u001b[39m.\u001b[39;49mrun(\n\u001b[0;32m 340\u001b[0m opt_compute, feed_dict\u001b[39m=\u001b[39;49m{context: sample_batch()})\n\u001b[0;32m 341\u001b[0m (v_loss, v_summary) \u001b[39m=\u001b[39m sess\u001b[39m.\u001b[39mrun((opt_apply, summary_loss))\n\u001b[0;32m 342\u001b[0m \u001b[39melse\u001b[39;00m:\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:967\u001b[0m, in \u001b[0;36mBaseSession.run\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 964\u001b[0m run_metadata_ptr \u001b[39m=\u001b[39m tf_session\u001b[39m.\u001b[39mTF_NewBuffer() \u001b[39mif\u001b[39;00m run_metadata \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m 966\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m--> 967\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run(\u001b[39mNone\u001b[39;49;00m, fetches, feed_dict, options_ptr,\n\u001b[0;32m 968\u001b[0m run_metadata_ptr)\n\u001b[0;32m 969\u001b[0m \u001b[39mif\u001b[39;00m run_metadata:\n\u001b[0;32m 970\u001b[0m proto_data \u001b[39m=\u001b[39m tf_session\u001b[39m.\u001b[39mTF_GetBuffer(run_metadata_ptr)\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1190\u001b[0m, in \u001b[0;36mBaseSession._run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 1187\u001b[0m \u001b[39m# We only want to really perform the run if fetches or targets are provided,\u001b[39;00m\n\u001b[0;32m 1188\u001b[0m \u001b[39m# or if the call is a partial run that specifies feeds.\u001b[39;00m\n\u001b[0;32m 1189\u001b[0m \u001b[39mif\u001b[39;00m final_fetches \u001b[39mor\u001b[39;00m final_targets \u001b[39mor\u001b[39;00m (handle \u001b[39mand\u001b[39;00m feed_dict_tensor):\n\u001b[1;32m-> 1190\u001b[0m results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_do_run(handle, final_targets, final_fetches,\n\u001b[0;32m 1191\u001b[0m feed_dict_tensor, options, run_metadata)\n\u001b[0;32m 1192\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 1193\u001b[0m results \u001b[39m=\u001b[39m []\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1370\u001b[0m, in \u001b[0;36mBaseSession._do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 1367\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_call_tf_sessionprun(handle, feed_dict, fetch_list)\n\u001b[0;32m 1369\u001b[0m \u001b[39mif\u001b[39;00m handle \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m-> 1370\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m 1371\u001b[0m run_metadata)\n\u001b[0;32m 1372\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 1373\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_do_call(_prun_fn, handle, feeds, fetches)\n",
"File \u001b[1;32m~\\.conda\\envs\\tf-gpu\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1396\u001b[0m, in \u001b[0;36mBaseSession._do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m 1391\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39m'\u001b[39m\u001b[39monly supports NHWC tensor format\u001b[39m\u001b[39m'\u001b[39m \u001b[39min\u001b[39;00m message:\n\u001b[0;32m 1392\u001b[0m message \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m (\u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39mA possible workaround: Try disabling Grappler optimizer\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m 1393\u001b[0m \u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39mby modifying the config for creating the session eg.\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m 1394\u001b[0m \u001b[39m'\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39msession_config.graph_options.rewrite_options.\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m 1395\u001b[0m \u001b[39m'\u001b[39m\u001b[39mdisable_meta_optimizer = True\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m-> 1396\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mtype\u001b[39m(e)(node_def, op, message)\n",
"\u001b[1;31mResourceExhaustedError\u001b[0m: Graph execution error:\n\nfailed to allocate memory\n\t [[{{node model/h10/attn/ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_float_Cast_1}}]]\nHint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode."
]
}
],
"source": [
"gpt2.finetune(sess,\n",
" file_path_es+'.txt',\n",
" model_name=model_name,\n",
" checkpoint_dir=checkpoint_dir, \n",
" steps=1000\n",
" ) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# test"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## en "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"\"\"Introduction and preliminaries\n",
"The focus of this paper is decompositions of (k, `)-sparse graphs into edge-disjoint subgraphs\n",
"that certify sparsity. We use graph to mean a multigraph, possibly with loops. We say that a\n",
"graph is (k, `)-sparse if no subset of n′ vertices spans more than kn′− ` edges in the graph; a\n",
"(k, `)-sparse graph with kn′− ` edges is (k, `)-tight. We call the range k ≤ `≤ 2k−1 the upper\n",
"range of sparse graphs and 0≤ `≤ k the lower range.\n",
"In this paper, we present efficient algorithms for finding decompositions that certify sparsity\n",
"in the upper range of `. Our algorithms also apply in the lower range, which was already ad-\n",
"dressed by [3, 4, 5, 6, 19]. A decomposition certifies the sparsity of a graph if the sparse graphs\n",
"and graphs admitting the decomposition coincide.\n",
"Our algorithms are based on a new characterization of sparse graphs, which we call the\n",
"pebble game with colors. The pebble game with colors is a simple graph construction rule that\n",
"produces a sparse graph along with a sparsity-certifying decomposition.\n",
"We define and study a canonical class of pebble game constructions, which correspond to\n",
"previously studied decompositions of sparse graphs into edge disjoint trees. Our results provide\n",
"a unifying framework for all the previously known special cases, including Nash-Williams-\n",
"Tutte and [7, 24]. Indeed, in the lower range, canonical pebble game constructions capture the\n",
"properties of the augmenting paths used in matroid union and intersection algorithms[5, 6].\n",
"Since the sparse graphs in the upper range are not known to be unions or intersections of the\n",
"matroids for which there are efficient augmenting path algorithms, these do not easily apply in\n",
"∗ Research of both authors funded by the NSF under grants NSF CCF-0430990 and NSF-DARPA CARGO\n",
"CCR-0310661 to the first author.\n",
"2 Ileana Streinu, Louis Theran\n",
"Term Meaning\n",
"Sparse graph G Every non-empty subgraph on n′ vertices has ≤ kn′− ` edges\n",
"Tight graph G G = (V,E) is sparse and |V |= n, |E|= kn− `\n",
"Block H in G G is sparse, and H is a tight subgraph\n",
"Component H of G G is sparse and H is a maximal block\n",
"Map-graph Graph that admits an out-degree-exactly-one orientation\n",
"(k, `)-maps-and-trees Edge-disjoint union of ` trees and (k− `) map-grpahs\n",
"`Tk Union of ` trees, each vertex is in exactly k of them\n",
"Set of tree-pieces of an `Tk induced on V ′ ⊂V Pieces of trees in the `Tk spanned by E(V ′)\n",
"Proper `Tk Every V ′ ⊂V contains ≥ ` pieces of trees from the `Tk\n",
"Table 1. Sparse graph and decomposition terminology used in this paper.\n",
"the upper range. Pebble game with colors constructions may thus be considered a strengthening\n",
"of augmenting paths to the upper range of matroidal sparse graphs.\n",
"1.1. Sparse graphs\n",
"\n",
"ABSTRACT\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gpt2.generate(sess,prefix=text,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## es"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"\"\"El foco de este documento son las descomposicións de (k, `)-sparse gráficos en bordes-disjunto subgraphs\n",
"que certifique la escasez. Usamos el gráfico para significar un múltiplo, posiblemente con bucles. Nosotros decimos que un\n",
"grafo es (k, `)-sparse si ningún subconjunto de n′ vértices abarca más de kn ` bordes en el gráfico; a\n",
"(k, `)-sparse gráfico con kn ` bordes es (k, `)-estrechado. Llamamos al rango k ≤ 2k−1 el superior\n",
"rango de gráficos escasos y 0≤ k el rango inferior.\n",
"En este artículo, presentamos algoritmos eficientes para encontrar descomposicións que certifiquen la escasez\n",
"en el rango superior de `. Nuestros algoritmos también se aplican en el rango inferior, que ya era ad-\n",
"vestido por [3, 4, 5, 6, 19]. Una descomposición certifica la escasez de un gráfico si los gráficos dispersos\n",
"y los gráficos que admiten la descomposición coinciden.\n",
"Nuestros algoritmos se basan en una nueva caracterización de gráficos escasos, que llamamos el\n",
"juego de guijarros con colores. El juego de guijarros con colores es una regla de construcción de gráficos simples que\n",
"produce un gráfico escaso junto con una descomposición certificadora de la escasez.\n",
"Definimos y estudiamos una clase canónica de construcciones de juego de guijarros, que corresponden a\n",
"previamente estudiado las descomposiciones de los gráficos escasos en los árboles disjuntos del borde. Nuestros resultados proporcionan\n",
"un marco unificador para todos los casos especiales conocidos anteriormente, incluidos Nash-Williams-\n",
"Tutte y [7, 24]. De hecho, en el rango inferior, las construcciones canónicas de juego de guijarros capturan la\n",
"propiedades de las rutas de aumento utilizadas en los algoritmos de unión de matroides y de intersección[5, 6].\n",
"Dado que los gráficos escasos en el rango superior no se sabe que son uniones o intersecciones de la\n",
"matroides para los que hay algoritmos de ruta de aumento eficiente, estos no se aplican fácilmente en\n",
"* Investigación de ambos autores financiada por la NSF bajo subvenciones NSF CCF-0430990 y NSF-DARPA CARGO\n",
"CCR-0310661 al primer autor.\n",
"2 Ileana Streinu, Louis Theran\n",
"Significado del término\n",
"Gráfico escaso G Cada subgrafo no vacío en n′ vértices tiene ≤ kn ` bordes\n",
"El gráfico ajustado G G = (V,E) es escaso y V = n, E= kn− `\n",
"El bloque H en G G es escaso, y H es un subgrafo apretado\n",
"El componente H de G G es escaso y H es un bloqueo máximo\n",
"Gráfico cartográfico que admite una orientación de grado-exactamente-uno\n",
"(k, `)-maps-and-trees Edge-disjunt union de ` árboles y (k- `) map-grpahs\n",
"`Tk Unión de ` árboles, cada vértice está exactamente en k de ellos\n",
"Conjunto de piezas arbóreas de un `Tk inducido en V ′ ́V Piezas de árboles en el `Tk extendido por E(V ′)\n",
"`Tk Apropiado Cada V ′ V contiene ≥ ` pedazos de árboles de la `Tk\n",
"Cuadro 1 Gráfico escaso y terminología de descomposición utilizada en este artículo.\n",
"el rango superior. Pebble juego con construcciones de colores por lo tanto puede ser considerado un fortalecimiento\n",
"de caminos de aumento a la gama superior de gráficos de la escasez matroidal.\n",
"1.1. Gráficos escasos\n",
"Un gráfico es (k, `)-sparse si para cualquier subgrafo no vacío con bordes m′ y n′ vértices, m′ ≤\n",
"kn `. Observamos que esta condición implica que 0 ≤ ` ≤ 2k− 1, y a partir de ahora en este\n",
"Haremos esta suposición. Un gráfico escaso que tiene n vértices y exactamente bordes kn\n",
"se llama apretado.\n",
"Para un gráfico G = (V,E), y V ′ V, utilizamos el intervalo de notación (V ′) para el número de bordes\n",
"en el subgráfico inducido por V ′. En un gráfico dirigido, out(V ′) es el número de bordes con la cola\n",
"en V ′ y la cabeza en V −V ′; para un subgráfico inducido por V ′, llamamos a tal borde un borde superior.\n",
"Hay dos tipos importantes de subgrafías de gráficos escasos. Un bloque es un subgrafo apretado de\n",
"un gráfico escaso. Un componente es un bloque máximo.\n",
"La Tabla 1 resume la escasa terminología gráfica utilizada en este artículo.\n",
"1.2. Descomposiciónes de certificación de la sparsidad\n",
"Un k-arborescencia es un gráfico que admite una descomposición en k borde-desjunto que abarca los árboles.\n",
"La Figura 1(a) muestra un ejemplo de una 3-arborescencia. Se describen los gráficos k-arborescentes\n",
"por los conocidos teoremas de Tutte [23] y Nash-Williams [17] como exactamente el (k,k) apretado\n",
"gráficos.\n",
"ABSTRACT\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gpt2.generate(sess,prefix=text,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# gradio interface"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def generateAbstract(text):\n",
" # with tf.compat.v1.variable_scope(\"weight\", reuse = True):\n",
" #sess = tf.compat.v1.get_variable('sess',gpt2.start_tf_sess())\n",
" tf.compat.v1.reset_default_graph()\n",
" sess = gpt2.start_tf_sess()\n",
" gpt2.load_gpt2(sess,checkpoint_dir=checkpoint_dir,run_name='run1')\n",
" txt = gpt2.generate(sess,prefix=str(text)+\"\\nABSTRACT\", return_as_list=True,truncate=sufix,checkpoint_dir=checkpoint_dir,nsamples=1)[0]\n",
" return str(txt[txt.find('ABSTRACT'):])\n",
"\n",
"\n",
"\n",
"iface = gr.Interface(fn=generateAbstract, inputs=gr.inputs.Textbox(lines=10, placeholder=\"text\"), outputs=\"textbox\")\n",
"iface.launch(debug = True )"
]
}
],
"metadata": {
"interpreter": {
"hash": "53fbdc69e3e12c371950068c144423682c30d04ec68c2bd46937202e33e0058d"
},
"kernelspec": {
"display_name": "Python 3.7.11 ('receta')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}